R600 Patches: KCache kernel arguments and 24-bit arithmetic

Tom Stellard tom at stellard.net
Tue Jun 25 14:37:40 PDT 2013


Hi,

The attached patches clean up kernel argument handling for both R600 and
SI and for R600 makes it possible to read arguments through the KCache.
There are also patches that add support for the 24-bit arithmetic instructions
(MAD_UINT24, MAD_INT24, MUL_UINT24, and MUL_INT24).  In order to test
these patches with you will also need to apply the corresponding Mesa
patches which will be on the mailing list soon.

-Tom
-------------- next part --------------
>From deea49589cb27126d4699c0dc57f8ed9c2a9f563 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Mon, 17 Jun 2013 16:46:40 -0700
Subject: [PATCH 01/10] R600: Clean up extended load patterns

---
 lib/Target/R600/AMDGPUInstructions.td | 26 ++++++++++++++++++++++++--
 lib/Target/R600/R600ISelLowering.cpp  |  8 ++++----
 lib/Target/R600/R600Instructions.td   | 16 ++++++++--------
 lib/Target/R600/SIInstructions.td     |  2 +-
 test/CodeGen/R600/short-args.ll       |  1 +
 5 files changed, 38 insertions(+), 15 deletions(-)

diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td
index 29df374..eadb368 100644
--- a/lib/Target/R600/AMDGPUInstructions.td
+++ b/lib/Target/R600/AMDGPUInstructions.td
@@ -86,11 +86,33 @@ def COND_NULL : PatLeaf <
 // Load/Store Pattern Fragments
 //===----------------------------------------------------------------------===//
 
-def zextloadi8_global : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{
+def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+  LoadSDNode *L = cast<LoadSDNode>(N);
+  return L->getExtensionType() == ISD::ZEXTLOAD ||
+         L->getExtensionType() == ISD::EXTLOAD;
+}]>;
+
+def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+
+def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
+    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
+    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
+}]>;
+
+def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+
+def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
     return isGlobalLoad(dyn_cast<LoadSDNode>(N));
 }]>;
 
-def zextloadi8_constant : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr), [{
+def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
     return isGlobalLoad(dyn_cast<LoadSDNode>(N));
 }]>;
 
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index b898af1..66bfaca 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -70,10 +70,10 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
   setOperationAction(ISD::LOAD, MVT::i32, Custom);
   setOperationAction(ISD::LOAD, MVT::v2i32, Expand);
   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
-  setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Custom);
-  setLoadExtAction(ISD::EXTLOAD, MVT::i8, Custom);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Custom);
+  setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
+  setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Expand);
+  setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
   setOperationAction(ISD::STORE, MVT::i8, Custom);
   setOperationAction(ISD::STORE, MVT::i32, Custom);
   setOperationAction(ISD::STORE, MVT::v2i32, Expand);
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index d819d44..13277d9 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -314,8 +314,8 @@ class LoadParamFrag <PatFrag load_type> : PatFrag <
 >;
 
 def load_param : LoadParamFrag<load>;
-def load_param_zexti8 : LoadParamFrag<zextloadi8>;
-def load_param_zexti16 : LoadParamFrag<zextloadi16>;
+def load_param_exti8 : LoadParamFrag<az_extloadi8>;
+def load_param_exti16 : LoadParamFrag<az_extloadi16>;
 
 def isR600 : Predicate<"Subtarget.getGeneration() <= AMDGPUSubtarget::R700">;
 def isR700 : Predicate<"Subtarget.getGeneration() == AMDGPUSubtarget::R700">;
@@ -1386,11 +1386,11 @@ class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
 //===----------------------------------------------------------------------===//
 
 def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
-  [(set i32:$dst_gpr, (load_param_zexti8 ADDRVTX_READ:$src_gpr))]
+  [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
 >;
 
 def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
-  [(set i32:$dst_gpr, (load_param_zexti16 ADDRVTX_READ:$src_gpr))]
+  [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
 >;
 
 def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
@@ -1407,7 +1407,7 @@ def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
 
 // 8-bit reads
 def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
-  [(set i32:$dst_gpr, (zextloadi8_global ADDRVTX_READ:$src_gpr))]
+  [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
 >;
 
 // 32-bit reads
@@ -1729,11 +1729,11 @@ class VTX_READ_128_cm <bits<8> buffer_id, list<dag> pattern>
 // VTX Read from parameter memory space
 //===----------------------------------------------------------------------===//
 def VTX_READ_PARAM_8_cm : VTX_READ_8_cm <0,
-  [(set i32:$dst_gpr, (load_param_zexti8 ADDRVTX_READ:$src_gpr))]
+  [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
 >;
 
 def VTX_READ_PARAM_16_cm : VTX_READ_16_cm <0,
-  [(set i32:$dst_gpr, (load_param_zexti16 ADDRVTX_READ:$src_gpr))]
+  [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
 >;
 
 def VTX_READ_PARAM_32_cm : VTX_READ_32_cm <0,
@@ -1750,7 +1750,7 @@ def VTX_READ_PARAM_128_cm : VTX_READ_128_cm <0,
 
 // 8-bit reads
 def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1,
-  [(set i32:$dst_gpr, (zextloadi8_global ADDRVTX_READ:$src_gpr))]
+  [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
 >;
 
 // 32-bit reads
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 9c96c08..1124222 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -1637,7 +1637,7 @@ multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
 defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32,
                           global_load, constant_load>;
 defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32,
-                          zextloadi8_global, zextloadi8_constant>;
+                          az_extloadi8_global, az_extloadi8_constant>;
 defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32,
                           global_load, constant_load>;
 defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32,
diff --git a/test/CodeGen/R600/short-args.ll b/test/CodeGen/R600/short-args.ll
index b69e327..1e8ba24 100644
--- a/test/CodeGen/R600/short-args.ll
+++ b/test/CodeGen/R600/short-args.ll
@@ -1,4 +1,5 @@
 ; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s
 
 ; CHECK: @i8_arg
 ; CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-- 
1.7.11.4

-------------- next part --------------
>From 7ea5d0a29656b478579b6045ec17388210254518 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Tue, 18 Jun 2013 16:28:34 -0700
Subject: [PATCH 02/10] R600: Use correct LoadExtType when lowering kernel
 arguments

---
 lib/Target/R600/R600ISelLowering.cpp | 10 +++++++++-
 test/CodeGen/R600/short-args.ll      | 19 +++++++++++++++++++
 2 files changed, 28 insertions(+), 1 deletion(-)

diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 66bfaca..539ab07 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -1179,9 +1179,17 @@ SDValue R600TargetLowering::LowerFormalArguments(
     } else {
       ArgVT = VT;
     }
+
+    ISD::LoadExtType LoadType = ISD::EXTLOAD;
+    if (Ins[i].Flags.isZExt()) {
+      LoadType = ISD::ZEXTLOAD;
+    } else if (Ins[i].Flags.isSExt()) {
+      LoadType = ISD::SEXTLOAD;
+    }
+
     PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
                                                     AMDGPUAS::PARAM_I_ADDRESS);
-    SDValue Arg = DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, DAG.getRoot(),
+    SDValue Arg = DAG.getExtLoad(LoadType, DL, VT, DAG.getRoot(),
                                 DAG.getConstant(ParamOffsetBytes, MVT::i32),
                                        MachinePointerInfo(UndefValue::get(PtrTy)),
                                        ArgVT, false, false, ArgBytes);
diff --git a/test/CodeGen/R600/short-args.ll b/test/CodeGen/R600/short-args.ll
index 1e8ba24..8f4dc96 100644
--- a/test/CodeGen/R600/short-args.ll
+++ b/test/CodeGen/R600/short-args.ll
@@ -21,6 +21,15 @@ entry:
   ret void
 }
 
+; CHECK: @i8_sext_arg
+; CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
+entry:
+  %0 = sext i8 %in to i32
+  store i32 %0, i32 addrspace(1)* %out, align 4
+  ret void
+}
+
 ; CHECK: @i16_arg
 ; CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
 
@@ -40,3 +49,13 @@ entry:
   store i32 %0, i32 addrspace(1)* %out, align 4
   ret void
 }
+
+; CHECK: @i16_sext_arg
+; CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+
+define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
+entry:
+  %0 = sext i16 %in to i32
+  store i32 %0, i32 addrspace(1)* %out, align 4
+  ret void
+}
-- 
1.7.11.4

-------------- next part --------------
>From 6eb4be25b26f74fbe26c2172158f6e9576a6de91 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Wed, 19 Jun 2013 16:16:09 -0700
Subject: [PATCH 03/10] R600: Use the same compute kernel calling convention
 for all GPUs

A side-effect of this is that now the compiler expects kernel arguments
to be 4-byte aligned.
---
 lib/Target/R600/AMDGPUCallingConv.td   | 14 ++++++++++----
 lib/Target/R600/AMDGPUISelLowering.cpp |  1 +
 lib/Target/R600/R600ISelLowering.cpp   | 21 +++++++++++++++------
 3 files changed, 26 insertions(+), 10 deletions(-)

diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td
index 826932b..54b2946 100644
--- a/lib/Target/R600/AMDGPUCallingConv.td
+++ b/lib/Target/R600/AMDGPUCallingConv.td
@@ -36,8 +36,8 @@ def CC_SI : CallingConv<[
 
 ]>;
 
-// Calling convention for SI compute kernels
-def CC_SI_Kernel : CallingConv<[
+// Calling convention for compute kernels
+def CC_AMDGPU_Kernel : CallingConv<[
   CCIfType<[v4i32, v4f32], CCAssignToStack <16, 4>>,
   CCIfType<[i64],          CCAssignToStack < 8, 4>>,
   CCIfType<[i32, f32],     CCAssignToStack < 4, 4>>,
@@ -46,8 +46,14 @@ def CC_SI_Kernel : CallingConv<[
 ]>;
 
 def CC_AMDGPU : CallingConv<[
-  CCIf<"State.getMachineFunction().getInfo<SIMachineFunctionInfo>()->"#
-       "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_SI_Kernel>>,
+  CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() == "
+       "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+       "State.getMachineFunction().getInfo<SIMachineFunctionInfo>()->"#
+       "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
+  CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>().getGeneration() < "
+       "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
+       "State.getMachineFunction().getInfo<R600MachineFunctionInfo>()->"
+       "ShaderType == ShaderType::COMPUTE", CCDelegateTo<CC_AMDGPU_Kernel>>,
   CCIf<"State.getTarget().getSubtarget<AMDGPUSubtarget>()"#
        ".getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo<CC_SI>>
 ]>;
diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp
index 6d73590..a0c6fc8 100644
--- a/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -18,6 +18,7 @@
 #include "AMDGPURegisterInfo.h"
 #include "AMDGPUSubtarget.h"
 #include "AMDILIntrinsicInfo.h"
+#include "R600MachineFunctionInfo.h"
 #include "SIMachineFunctionInfo.h"
 #include "llvm/CodeGen/CallingConvLower.h"
 #include "llvm/CodeGen/MachineFunction.h"
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 539ab07..6ca3a12 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -16,6 +16,7 @@
 #include "R600Defines.h"
 #include "R600InstrInfo.h"
 #include "R600MachineFunctionInfo.h"
+#include "llvm/CodeGen/CallingConvLower.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -1162,11 +1163,17 @@ SDValue R600TargetLowering::LowerFormalArguments(
                                       const SmallVectorImpl<ISD::InputArg> &Ins,
                                       SDLoc DL, SelectionDAG &DAG,
                                       SmallVectorImpl<SDValue> &InVals) const {
-  unsigned ParamOffsetBytes = 36;
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
+                 getTargetMachine(), ArgLocs, *DAG.getContext());
+
+  AnalyzeFormalArguments(CCInfo, Ins);
+
   Function::const_arg_iterator FuncArg =
                             DAG.getMachineFunction().getFunction()->arg_begin();
   for (unsigned i = 0, e = Ins.size(); i < e; ++i, ++FuncArg) {
-    EVT VT = Ins[i].VT;
+    CCValAssign &VA = ArgLocs[i];
+    EVT VT = VA.getLocVT();
     Type *ArgType = FuncArg->getType();
     unsigned ArgSizeInBits = ArgType->isPointerTy() ?
                              32 : ArgType->getPrimitiveSizeInBits();
@@ -1189,12 +1196,14 @@ SDValue R600TargetLowering::LowerFormalArguments(
 
     PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
                                                     AMDGPUAS::PARAM_I_ADDRESS);
+
+    // The first 36 bytes of the input buffer contains information about
+    // thread group and global sizes.
     SDValue Arg = DAG.getExtLoad(LoadType, DL, VT, DAG.getRoot(),
-                                DAG.getConstant(ParamOffsetBytes, MVT::i32),
-                                       MachinePointerInfo(UndefValue::get(PtrTy)),
-                                       ArgVT, false, false, ArgBytes);
+                           DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
+                           MachinePointerInfo(UndefValue::get(PtrTy)),
+                           ArgVT, false, false, ArgBytes);
     InVals.push_back(Arg);
-    ParamOffsetBytes += ArgBytes;
   }
   return Chain;
 }
-- 
1.7.11.4

-------------- next part --------------
>From 6935b69be09fe75b3e35a9184ae880b703fe16af Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Wed, 19 Jun 2013 17:01:33 -0700
Subject: [PATCH 04/10] R600: Simplify assembly for KCache registers using the
 TableGen !add operator

Before:

MOV * T0.W, KC0[131-128].Y

After:

MOV * T0.W, KC0[3].Y
---
 lib/Target/R600/R600RegisterInfo.td | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/lib/Target/R600/R600RegisterInfo.td b/lib/Target/R600/R600RegisterInfo.td
index a8b9b70..442b65a 100644
--- a/lib/Target/R600/R600RegisterInfo.td
+++ b/lib/Target/R600/R600RegisterInfo.td
@@ -47,10 +47,10 @@ foreach Index = 0-127 in {
 foreach Index = 159-128 in {
   foreach Chan = [ "X", "Y", "Z", "W" ] in {
     // 32-bit Temporary Registers
-    def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#Index#"-128]."#Chan, Index, Chan>;
+    def KC0_#Index#_#Chan : R600RegWithChan <"KC0["#!add(Index,-128)#"]."#Chan, Index, Chan>;
   }
   // 128-bit Temporary Registers
-  def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#Index#"-128].XYZW",
+  def KC0_#Index#_XYZW : R600Reg_128 <"KC0["#!add(Index, -128)#"].XYZW",
                                  [!cast<Register>("KC0_"#Index#"_X"),
                                   !cast<Register>("KC0_"#Index#"_Y"),
                                   !cast<Register>("KC0_"#Index#"_Z"),
@@ -62,10 +62,10 @@ foreach Index = 159-128 in {
 foreach Index = 191-160 in {
   foreach Chan = [ "X", "Y", "Z", "W" ] in {
     // 32-bit Temporary Registers
-    def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#Index#"-160]."#Chan, Index, Chan>;
+    def KC1_#Index#_#Chan : R600RegWithChan <"KC1["#!add(Index,-160)#"]."#Chan, Index, Chan>;
   }
   // 128-bit Temporary Registers
-  def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#Index#"-160].XYZW",
+  def KC1_#Index#_XYZW : R600Reg_128 <"KC1["#!add(Index, -160)#"].XYZW",
                                  [!cast<Register>("KC1_"#Index#"_X"),
                                   !cast<Register>("KC1_"#Index#"_Y"),
                                   !cast<Register>("KC1_"#Index#"_Z"),
-- 
1.7.11.4

-------------- next part --------------
>From c23833b740cd0a397ec9b3d510655388716d1204 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Thu, 20 Jun 2013 07:35:03 -0700
Subject: [PATCH 05/10] R600: Use KCache for kernel arguments

---
 lib/Target/R600/AMDGPU.h                  |  6 +++++
 lib/Target/R600/AMDILISelDAGToDAG.cpp     | 21 +++-------------
 lib/Target/R600/R600ISelLowering.cpp      | 42 ++++++++-----------------------
 lib/Target/R600/R600Instructions.td       |  2 +-
 test/CodeGen/R600/128bit-kernel-args.ll   | 10 ++++++--
 test/CodeGen/R600/add.ll                  | 12 ++++-----
 test/CodeGen/R600/bfi_int.ll              |  4 +--
 test/CodeGen/R600/fdiv.ll                 | 26 +++++++++----------
 test/CodeGen/R600/literals.ll             | 12 ++++-----
 test/CodeGen/R600/llvm.AMDGPU.trunc.ll    |  2 +-
 test/CodeGen/R600/load.vec.ll             |  4 +--
 test/CodeGen/R600/loop-address.ll         | 12 ++++-----
 test/CodeGen/R600/rotr.ll                 |  2 +-
 test/CodeGen/R600/selectcc-opt.ll         |  1 -
 test/CodeGen/R600/set-dx10.ll             | 24 +++++++++---------
 test/CodeGen/R600/short-args.ll           | 12 ++++-----
 test/CodeGen/R600/unsupported-cc.ll       | 16 ++++++------
 test/CodeGen/R600/vtx-schedule.ll         | 14 ++++-------
 test/CodeGen/R600/work-item-intrinsics.ll | 18 ++++++-------
 19 files changed, 105 insertions(+), 135 deletions(-)

diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h
index f284291..b7c96d2 100644
--- a/lib/Target/R600/AMDGPU.h
+++ b/lib/Target/R600/AMDGPU.h
@@ -75,6 +75,12 @@ enum AddressSpaces {
   ADDRESS_NONE     = 5, ///< Address space for unknown memory.
   PARAM_D_ADDRESS  = 6, ///< Address space for direct addressible parameter memory (CONST0)
   PARAM_I_ADDRESS  = 7, ///< Address space for indirect addressible parameter memory (VTX1)
+
+  // Do not re-order the CONSTANT_BUFFER_* enums.  Several places depend on this
+  // order to be able to dynamically index a constant buffer, for example:
+  //
+  // ConstantBufferAS = CONSTATNT_BUFFER_0 + CBIdx
+
   CONSTANT_BUFFER_0 = 8,
   CONSTANT_BUFFER_1 = 9,
   CONSTANT_BUFFER_2 = 10,
diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
index 9f077b9..f79f4b9 100644
--- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
+++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp
@@ -560,24 +560,11 @@ bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
   return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
 }
 
-bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int cbID) const {
-  if (checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)) {
-    return true;
-  }
-
-  const DataLayout *DL = TM.getDataLayout();
-  MachineMemOperand *MMO = N->getMemOperand();
-  const Value *V = MMO->getValue();
-  const Value *BV = GetUnderlyingObject(V, DL, 0);
-  if (MMO
-      && MMO->getValue()
-      && ((V && dyn_cast<GlobalValue>(V))
-          || (BV && dyn_cast<GlobalValue>(
-                GetUnderlyingObject(MMO->getValue(), DL, 0))))) {
-    return checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS);
-  } else {
-    return false;
+bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
+  if (CbId == -1) {
+    return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
   }
+  return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
 }
 
 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 6ca3a12..2cc87b1 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -71,10 +71,10 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
   setOperationAction(ISD::LOAD, MVT::i32, Custom);
   setOperationAction(ISD::LOAD, MVT::v2i32, Expand);
   setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Expand);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Custom);
+  setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Custom);
+  setLoadExtAction(ISD::ZEXTLOAD, MVT::i8, Custom);
+  setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Custom);
   setOperationAction(ISD::STORE, MVT::i8, Custom);
   setOperationAction(ISD::STORE, MVT::i32, Custom);
   setOperationAction(ISD::STORE, MVT::v2i32, Expand);
@@ -725,7 +725,7 @@ SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
                                                    unsigned DwordOffset) const {
   unsigned ByteOffset = DwordOffset * 4;
   PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
-                                      AMDGPUAS::PARAM_I_ADDRESS);
+                                      AMDGPUAS::CONSTANT_BUFFER_0);
 
   // We shouldn't be using an offset wider than 16-bits for implicit parameters.
   assert(isInt<16>(ByteOffset));
@@ -1169,40 +1169,20 @@ SDValue R600TargetLowering::LowerFormalArguments(
 
   AnalyzeFormalArguments(CCInfo, Ins);
 
-  Function::const_arg_iterator FuncArg =
-                            DAG.getMachineFunction().getFunction()->arg_begin();
-  for (unsigned i = 0, e = Ins.size(); i < e; ++i, ++FuncArg) {
+  for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
     CCValAssign &VA = ArgLocs[i];
     EVT VT = VA.getLocVT();
-    Type *ArgType = FuncArg->getType();
-    unsigned ArgSizeInBits = ArgType->isPointerTy() ?
-                             32 : ArgType->getPrimitiveSizeInBits();
-    unsigned ArgBytes = ArgSizeInBits >> 3;
-    EVT ArgVT;
-    if (ArgSizeInBits < VT.getSizeInBits()) {
-      assert(!ArgType->isFloatTy() &&
-             "Extending floating point arguments not supported yet");
-      ArgVT = MVT::getIntegerVT(ArgSizeInBits);
-    } else {
-      ArgVT = VT;
-    }
-
-    ISD::LoadExtType LoadType = ISD::EXTLOAD;
-    if (Ins[i].Flags.isZExt()) {
-      LoadType = ISD::ZEXTLOAD;
-    } else if (Ins[i].Flags.isSExt()) {
-      LoadType = ISD::SEXTLOAD;
-    }
 
     PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
-                                                    AMDGPUAS::PARAM_I_ADDRESS);
+                                                   AMDGPUAS::CONSTANT_BUFFER_0);
 
     // The first 36 bytes of the input buffer contains information about
     // thread group and global sizes.
-    SDValue Arg = DAG.getExtLoad(LoadType, DL, VT, DAG.getRoot(),
+    SDValue Arg = DAG.getLoad(VT, DL, Chain,
                            DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
-                           MachinePointerInfo(UndefValue::get(PtrTy)),
-                           ArgVT, false, false, ArgBytes);
+                           MachinePointerInfo(UndefValue::get(PtrTy)), false,
+                           false, false, 4); // 4 is the prefered alignment for
+                                             // the CONSTANT memory space.
     InVals.push_back(Arg);
   }
   return Chain;
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index 13277d9..31f98a8 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -310,7 +310,7 @@ class VTX_READ <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
 
 class LoadParamFrag <PatFrag load_type> : PatFrag <
   (ops node:$ptr), (load_type node:$ptr),
-  [{ return isParamLoad(dyn_cast<LoadSDNode>(N)); }]
+  [{ return isConstantLoad(dyn_cast<LoadSDNode>(N), 0); }]
 >;
 
 def load_param : LoadParamFrag<load>;
diff --git a/test/CodeGen/R600/128bit-kernel-args.ll b/test/CodeGen/R600/128bit-kernel-args.ll
index bd60385..bb2c015 100644
--- a/test/CodeGen/R600/128bit-kernel-args.ll
+++ b/test/CodeGen/R600/128bit-kernel-args.ll
@@ -2,7 +2,10 @@
 ; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
 
 ; R600-CHECK: @v4i32_kernel_arg
-; R600-CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 40
+; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR:[0-9]]].X, KC0[2].Z
+; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Y, KC0[2].W
+; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Z, KC0[3].X
+; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].W, KC0[3].Y
 ; SI-CHECK: @v4i32_kernel_arg
 ; SI-CHECK: BUFFER_STORE_DWORDX4
 define void @v4i32_kernel_arg(<4 x i32> addrspace(1)* %out, <4 x i32>  %in) {
@@ -12,7 +15,10 @@ entry:
 }
 
 ; R600-CHECK: @v4f32_kernel_arg
-; R600-CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 40
+; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR:[0-9]]].X, KC0[2].Z
+; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Y, KC0[2].W
+; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].Z, KC0[3].X
+; R600-CHECK-DAG: MOV {{[* ]*}}T[[GPR]].W, KC0[3].Y
 ; SI-CHECK: @v4f32_kernel_arg
 ; SI-CHECK: BUFFER_STORE_DWORDX4
 define void @v4f32_kernel_args(<4 x float> addrspace(1)* %out, <4 x float>  %in) {
diff --git a/test/CodeGen/R600/add.ll b/test/CodeGen/R600/add.ll
index dd590e5..16f7f97 100644
--- a/test/CodeGen/R600/add.ll
+++ b/test/CodeGen/R600/add.ll
@@ -2,8 +2,8 @@
 ; RUN: llc < %s -march=r600 -mcpu=verde | FileCheck --check-prefix=SI-CHECK %s
 
 ;EG-CHECK: @test2
-;EG-CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ADD_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], literal\.[xyzw]}}
+;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 
 ;SI-CHECK: @test2
 ;SI-CHECK: V_ADD_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
@@ -19,10 +19,10 @@ define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
 }
 
 ;EG-CHECK: @test4
-;EG-CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ADD_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ADD_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;EG-CHECK: ADD_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 
 ;SI-CHECK: @test4
 ;SI-CHECK: V_ADD_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
diff --git a/test/CodeGen/R600/bfi_int.ll b/test/CodeGen/R600/bfi_int.ll
index a1bd09a..b001ad0 100644
--- a/test/CodeGen/R600/bfi_int.ll
+++ b/test/CodeGen/R600/bfi_int.ll
@@ -36,8 +36,8 @@ entry:
 ; SHA-256 Ma function
 ; ((x & z) | (y & (x | z)))
 ; R600-CHECK: @bfi_sha256_ma
-; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; R600-CHECK: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, {{T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]],
+; R600-CHECK: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
 ; SI-CHECK: V_XOR_B32_e64 [[DST:VGPR[0-9]+]], {{[SV]GPR[0-9]+, [SV]GPR[0-9]+}}
 ; SI-CHECK: V_BFI_B32 {{VGPR[0-9]+}}, [[DST]], {{[SV]GPR[0-9]+, [SV]GPR[0-9]+}}
 
diff --git a/test/CodeGen/R600/fdiv.ll b/test/CodeGen/R600/fdiv.ll
index 003590b..b39960d 100644
--- a/test/CodeGen/R600/fdiv.ll
+++ b/test/CodeGen/R600/fdiv.ll
@@ -1,19 +1,17 @@
 ;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
 
-;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}
+;CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW]}}
+;CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}
+;CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}
+;CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW]}}
+;CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW]}}
+;CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}
+;CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW]}}
 
-define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
-  %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
-  %a = load <4 x float> addrspace(1) * %in
-  %b = load <4 x float> addrspace(1) * %b_ptr
-  %result = fdiv <4 x float> %a, %b
-  store <4 x float> %result, <4 x float> addrspace(1)* %out
+define void @test(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) {
+entry:
+  %0 = fdiv <4 x float> %a, %b
+  store <4 x float> %0, <4 x float> addrspace(1)* %out
   ret void
 }
diff --git a/test/CodeGen/R600/literals.ll b/test/CodeGen/R600/literals.ll
index 21e5d4c..fbb77b3 100644
--- a/test/CodeGen/R600/literals.ll
+++ b/test/CodeGen/R600/literals.ll
@@ -2,12 +2,12 @@
 
 ; Test using an integer literal constant.
 ; Generated ASM should be:
-; ADD_INT REG literal.x, 5
+; ADD_INT KC0[2].Z literal.x, 5
 ; or
-; ADD_INT literal.x REG, 5
+; ADD_INT literal.x KC0[2].Z, 5
 
 ; CHECK: @i32_literal
-; CHECK: ADD_INT * {{[A-Z0-9,. ]*}}literal.x
+; CHECK: ADD_INT * T{{[0-9]\.[XYZW]}}, KC0[2].Z, literal.x
 ; CHECK-NEXT: 5
 define void @i32_literal(i32 addrspace(1)* %out, i32 %in) {
 entry:
@@ -18,12 +18,12 @@ entry:
 
 ; Test using a float literal constant.
 ; Generated ASM should be:
-; ADD REG literal.x, 5.0
+; ADD KC0[2].Z literal.x, 5.0
 ; or
-; ADD literal.x REG, 5.0
+; ADD literal.x KC0[2].Z, 5.0
 
 ; CHECK: @float_literal
-; CHECK: ADD * {{[A-Z0-9,. ]*}}literal.x
+; CHECK: ADD * T{{[0-9]\.[XYZW]}}, KC0[2].Z, literal.x
 ; CHECK-NEXT: 1084227584(5.0
 define void @float_literal(float addrspace(1)* %out, float %in) {
 entry:
diff --git a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
index cdc03f8..7627783 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -march=r600 -mcpu=verde | FileCheck --check-prefix=SI-CHECK %s
 
 ; R600-CHECK: @amdgpu_trunc
-; R600-CHECK: TRUNC * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600-CHECK: TRUNC * T{{[0-9]+\.[XYZW]}}, KC0[2].Z
 ; SI-CHECK: @amdgpu_trunc
 ; SI-CHECK: V_TRUNC_F32
 
diff --git a/test/CodeGen/R600/load.vec.ll b/test/CodeGen/R600/load.vec.ll
index da1149a..b3d6349 100644
--- a/test/CodeGen/R600/load.vec.ll
+++ b/test/CodeGen/R600/load.vec.ll
@@ -3,8 +3,8 @@
 
 ; load a v2i32 value from the global address space.
 ; EG-CHECK: @load_v2i32
-; EG-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 4
-; EG-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
+; EG-CHECK-DAG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 4
+; EG-CHECK-DAG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
 ; SI-CHECK: @load_v2i32
 ; SI-CHECK: BUFFER_LOAD_DWORDX2 VGPR{{[0-9]+}}
 define void @load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/loop-address.ll b/test/CodeGen/R600/loop-address.ll
index 8a5458b..638aa29 100644
--- a/test/CodeGen/R600/loop-address.ll
+++ b/test/CodeGen/R600/loop-address.ll
@@ -1,13 +1,11 @@
 ;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
 
-;CHECK: TEX
 ;CHECK: ALU_PUSH
-;CHECK: JUMP @4
-;CHECK: ELSE @16
-;CHECK: TEX
-;CHECK: LOOP_START_DX10 @15
-;CHECK: LOOP_BREAK @14
-;CHECK: POP @16
+;CHECK: JUMP @2
+;CHECK: ELSE @13
+;CHECK: LOOP_START_DX10 @12
+;CHECK: LOOP_BREAK @11
+;CHECK: POP @13
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
 target triple = "r600--"
diff --git a/test/CodeGen/R600/rotr.ll b/test/CodeGen/R600/rotr.ll
index 960d30d..5c4c4e9 100644
--- a/test/CodeGen/R600/rotr.ll
+++ b/test/CodeGen/R600/rotr.ll
@@ -19,7 +19,7 @@ entry:
 ; R600-CHECK: @rotl
 ; R600-CHECK: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
 ; R600-CHECK-NEXT: 32
-; R600-CHECK: BIT_ALIGN_INT {{\** T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PV.[XYZW]}}
+; R600-CHECK: BIT_ALIGN_INT {{\** T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
 
 ; SI-CHECK: @rotl
 ; SI-CHECK: V_SUB_I32_e64 [[DST:VGPR[0-9]+]], 32, {{[SV]GPR[0-9]+}}
diff --git a/test/CodeGen/R600/selectcc-opt.ll b/test/CodeGen/R600/selectcc-opt.ll
index 7f568fc..7e2d559 100644
--- a/test/CodeGen/R600/selectcc-opt.ll
+++ b/test/CodeGen/R600/selectcc-opt.ll
@@ -29,7 +29,6 @@ ENDIF:
 ; for the icmp instruction
 
 ; CHECK: @test_b
-; CHECK: VTX_READ
 ; CHECK: SET{{[GTEQN]+}}_DX10
 ; CHECK-NEXT: PRED_
 ; CHECK-NEXT: ALU clause starting
diff --git a/test/CodeGen/R600/set-dx10.ll b/test/CodeGen/R600/set-dx10.ll
index eb6e9d2..291a7bd 100644
--- a/test/CodeGen/R600/set-dx10.ll
+++ b/test/CodeGen/R600/set-dx10.ll
@@ -5,7 +5,7 @@
 ; SET*DX10 instructions.
 
 ; CHECK: @fcmp_une_select_fptosi
-; CHECK: SETNE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK: SETNE_DX10 * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_une_select_fptosi(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -18,7 +18,7 @@ entry:
 }
 
 ; CHECK: @fcmp_une_select_i32
-; CHECK: SETNE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK: SETNE_DX10 * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_une_select_i32(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -29,7 +29,7 @@ entry:
 }
 
 ; CHECK: @fcmp_ueq_select_fptosi
-; CHECK: SETE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK: SETE_DX10 * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_ueq_select_fptosi(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -42,7 +42,7 @@ entry:
 }
 
 ; CHECK: @fcmp_ueq_select_i32
-; CHECK: SETE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK: SETE_DX10 * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_ueq_select_i32(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -53,7 +53,7 @@ entry:
 }
 
 ; CHECK: @fcmp_ugt_select_fptosi
-; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_ugt_select_fptosi(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -66,7 +66,7 @@ entry:
 }
 
 ; CHECK: @fcmp_ugt_select_i32
-; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_ugt_select_i32(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -77,7 +77,7 @@ entry:
 }
 
 ; CHECK: @fcmp_uge_select_fptosi
-; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_uge_select_fptosi(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -90,7 +90,7 @@ entry:
 }
 
 ; CHECK: @fcmp_uge_select_i32
-; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, literal.x,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_uge_select_i32(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -101,7 +101,7 @@ entry:
 }
 
 ; CHECK: @fcmp_ule_select_fptosi
-; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}},
+; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_ule_select_fptosi(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -114,7 +114,7 @@ entry:
 }
 
 ; CHECK: @fcmp_ule_select_i32
-; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}},
+; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_ule_select_i32(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -125,7 +125,7 @@ entry:
 }
 
 ; CHECK: @fcmp_ult_select_fptosi
-; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}},
+; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_ult_select_fptosi(i32 addrspace(1)* %out, float %in) {
 entry:
@@ -138,7 +138,7 @@ entry:
 }
 
 ; CHECK: @fcmp_ult_select_i32
-; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}},
+; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z,
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @fcmp_ult_select_i32(i32 addrspace(1)* %out, float %in) {
 entry:
diff --git a/test/CodeGen/R600/short-args.ll b/test/CodeGen/R600/short-args.ll
index 8f4dc96..69a8412 100644
--- a/test/CodeGen/R600/short-args.ll
+++ b/test/CodeGen/R600/short-args.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s
 
 ; CHECK: @i8_arg
-; CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
 
 define void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
 entry:
@@ -12,7 +12,7 @@ entry:
 }
 
 ; CHECK: @i8_zext_arg
-; CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
 
 define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
 entry:
@@ -22,7 +22,7 @@ entry:
 }
 
 ; CHECK: @i8_sext_arg
-; CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
 define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
 entry:
   %0 = sext i8 %in to i32
@@ -31,7 +31,7 @@ entry:
 }
 
 ; CHECK: @i16_arg
-; CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
 
 define void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
 entry:
@@ -41,7 +41,7 @@ entry:
 }
 
 ; CHECK: @i16_zext_arg
-; CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
 
 define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
 entry:
@@ -51,7 +51,7 @@ entry:
 }
 
 ; CHECK: @i16_sext_arg
-; CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
 
 define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
 entry:
diff --git a/test/CodeGen/R600/unsupported-cc.ll b/test/CodeGen/R600/unsupported-cc.ll
index b311f4c..cf29833 100644
--- a/test/CodeGen/R600/unsupported-cc.ll
+++ b/test/CodeGen/R600/unsupported-cc.ll
@@ -3,7 +3,7 @@
 ; These tests are for condition codes that are not supported by the hardware
 
 ; CHECK: @slt
-; CHECK: SETGT_INT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK: SETGT_INT * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
 ; CHECK-NEXT: 5(7.006492e-45)
 define void @slt(i32 addrspace(1)* %out, i32 %in) {
 entry:
@@ -14,7 +14,7 @@ entry:
 }
 
 ; CHECK: @ult_i32
-; CHECK: SETGT_UINT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK: SETGT_UINT * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
 ; CHECK-NEXT: 5(7.006492e-45)
 define void @ult_i32(i32 addrspace(1)* %out, i32 %in) {
 entry:
@@ -25,7 +25,7 @@ entry:
 }
 
 ; CHECK: @ult_float
-; CHECK: SETGT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK: SETGT * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @ult_float(float addrspace(1)* %out, float %in) {
 entry:
@@ -36,7 +36,7 @@ entry:
 }
 
 ; CHECK: @olt
-; CHECK: SETGT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK: SETGT * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
 ;CHECK-NEXT: 1084227584(5.000000e+00)
 define void @olt(float addrspace(1)* %out, float %in) {
 entry:
@@ -47,7 +47,7 @@ entry:
 }
 
 ; CHECK: @sle
-; CHECK: SETGT_INT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK: SETGT_INT * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
 ; CHECK-NEXT: 6(8.407791e-45)
 define void @sle(i32 addrspace(1)* %out, i32 %in) {
 entry:
@@ -58,7 +58,7 @@ entry:
 }
 
 ; CHECK: @ule_i32
-; CHECK: SETGT_UINT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK: SETGT_UINT * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
 ; CHECK-NEXT: 6(8.407791e-45)
 define void @ule_i32(i32 addrspace(1)* %out, i32 %in) {
 entry:
@@ -69,7 +69,7 @@ entry:
 }
 
 ; CHECK: @ule_float
-; CHECK: SETGE * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK: SETGE * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
 ; CHECK-NEXT: 1084227584(5.000000e+00)
 define void @ule_float(float addrspace(1)* %out, float %in) {
 entry:
@@ -80,7 +80,7 @@ entry:
 }
 
 ; CHECK: @ole
-; CHECK: SETGE * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK: SETGE * T{{[0-9]+\.[XYZW]}}, literal.x, KC0[2].Z
 ; CHECK-NEXT:1084227584(5.000000e+00)
 define void @ole(float addrspace(1)* %out, float %in) {
 entry:
diff --git a/test/CodeGen/R600/vtx-schedule.ll b/test/CodeGen/R600/vtx-schedule.ll
index a0c79e3..97d37ed 100644
--- a/test/CodeGen/R600/vtx-schedule.ll
+++ b/test/CodeGen/R600/vtx-schedule.ll
@@ -6,17 +6,13 @@
 
 ; CHECK: @test
 ; CHECK: Fetch clause
-; CHECK_VTX_READ_32 [[IN0:T[0-9]+\.X]], [[IN0]], 40
-; CHECK_VTX_READ_32 [[IN1:T[0-9]+\.X]], [[IN1]], 44
-; CHECK: Fetch clause
 ; CHECK_VTX_READ_32 [[IN0:T[0-9]+\.X]], [[IN0]], 0
+; CHECK: Fetch clause
 ; CHECK_VTX_READ_32 [[IN1:T[0-9]+\.X]], [[IN1]], 0
-define void @test(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in0, i32 addrspace(1)* nocapture %in1) {
+define void @test(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* addrspace(1)* nocapture %in0) {
 entry:
-  %0 = load i32 addrspace(1)* %in0, align 4
-  %1 = load i32 addrspace(1)* %in1, align 4
-  %cmp.i = icmp slt i32 %0, %1
-  %cond.i = select i1 %cmp.i, i32 %0, i32 %1
-  store i32 %cond.i, i32 addrspace(1)* %out, align 4
+  %0 = load i32 addrspace(1)* addrspace(1)* %in0
+  %1 = load i32 addrspace(1)* %0
+  store i32 %1, i32 addrspace(1)* %out
   ret void
 }
diff --git a/test/CodeGen/R600/work-item-intrinsics.ll b/test/CodeGen/R600/work-item-intrinsics.ll
index 46e3e54..7998983 100644
--- a/test/CodeGen/R600/work-item-intrinsics.ll
+++ b/test/CodeGen/R600/work-item-intrinsics.ll
@@ -3,7 +3,7 @@
 
 ; R600-CHECK: @ngroups_x
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 0
+; R600-CHECK: MOV * [[VAL]], KC0[0].X
 ; SI-CHECK: @ngroups_x
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 0
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
@@ -17,7 +17,7 @@ entry:
 
 ; R600-CHECK: @ngroups_y
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 4
+; R600-CHECK: MOV * [[VAL]], KC0[0].Y
 ; SI-CHECK: @ngroups_y
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 1
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
@@ -31,7 +31,7 @@ entry:
 
 ; R600-CHECK: @ngroups_z
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 8
+; R600-CHECK: MOV * [[VAL]], KC0[0].Z
 ; SI-CHECK: @ngroups_z
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 2
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
@@ -45,7 +45,7 @@ entry:
 
 ; R600-CHECK: @global_size_x
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 12
+; R600-CHECK: MOV * [[VAL]], KC0[0].W
 ; SI-CHECK: @global_size_x
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 3
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
@@ -59,7 +59,7 @@ entry:
 
 ; R600-CHECK: @global_size_y
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 16
+; R600-CHECK: MOV * [[VAL]], KC0[1].X
 ; SI-CHECK: @global_size_y
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 4
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
@@ -73,7 +73,7 @@ entry:
 
 ; R600-CHECK: @global_size_z
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 20
+; R600-CHECK: MOV * [[VAL]], KC0[1].Y
 ; SI-CHECK: @global_size_z
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 5
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
@@ -87,7 +87,7 @@ entry:
 
 ; R600-CHECK: @local_size_x
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 24
+; R600-CHECK: MOV * [[VAL]], KC0[1].Z
 ; SI-CHECK: @local_size_x
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 6
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
@@ -101,7 +101,7 @@ entry:
 
 ; R600-CHECK: @local_size_y
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 28
+; R600-CHECK: MOV * [[VAL]], KC0[1].W
 ; SI-CHECK: @local_size_y
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 7
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
@@ -115,7 +115,7 @@ entry:
 
 ; R600-CHECK: @local_size_z
 ; R600-CHECK: RAT_WRITE_CACHELESS_32_eg [[VAL:T[0-9]+\.X]]
-; R600-CHECK: VTX_READ_32 [[VAL]], [[VAL]], 32
+; R600-CHECK: MOV * [[VAL]], KC0[2].X
 ; SI-CHECK: @local_size_z
 ; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 8
 ; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
-- 
1.7.11.4

-------------- next part --------------
>From 4082baadf915915fb86b4875402216071f145f99 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Thu, 20 Jun 2013 13:36:47 -0700
Subject: [PATCH 06/10] R600: Move CONST_ADDRESS folding into
 AMDGPUDAGToDAGISel::Select()

This increases the number of opportunites we have for folding.  With the
previous implementation we were unable to fold into any instructions
other than the first when multiple instructions were selected from a
single SDNode.
---
 lib/Target/R600/AMDILISelDAGToDAG.cpp | 142 ++++++++++++++++++++++------------
 lib/Target/R600/R600ISelLowering.cpp  |  24 ++++++
 lib/Target/R600/R600InstrInfo.cpp     |  36 +++++++++
 lib/Target/R600/R600InstrInfo.h       |   7 ++
 test/CodeGen/R600/bfi_int.ll          |   2 +-
 5 files changed, 161 insertions(+), 50 deletions(-)

diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
index f79f4b9..fed044a 100644
--- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
+++ b/lib/Target/R600/AMDILISelDAGToDAG.cpp
@@ -50,7 +50,7 @@ public:
 private:
   inline SDValue getSmallIPtrImm(unsigned Imm);
   bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
-                   const R600InstrInfo *TII, std::vector<unsigned> Cst);
+                   const R600InstrInfo *TII);
   bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
   bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
 
@@ -158,12 +158,100 @@ bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
 }
 
 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
+  const R600InstrInfo *TII =
+                      static_cast<const R600InstrInfo*>(TM.getInstrInfo());
   unsigned int Opc = N->getOpcode();
   if (N->isMachineOpcode()) {
     return NULL;   // Already selected.
   }
   switch (Opc) {
   default: break;
+  case AMDGPUISD::CONST_ADDRESS: {
+    for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I);
+                              I != SDNode::use_end(); I = Next) {
+      Next = llvm::next(I);
+      if (!I->isMachineOpcode()) {
+        continue;
+      }
+      unsigned Opcode = I->getMachineOpcode();
+      bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
+      int SrcIdx = I.getOperandNo();
+      int SelIdx;
+      // Unlike MachineInstrs, SDNodes do not have results in their operand
+      // list, so we need to increment the SrcIdx, since
+      // R600InstrInfo::getOperandIdx is based on the MachineInstr indices.
+      if (HasDst) {
+        SrcIdx++;
+      }
+
+      SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx);
+      if (SelIdx < 0) {
+        continue;
+      }
+
+      SDValue CstOffset;
+      if (N->getValueType(0).isVector() ||
+          !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset))
+        continue;
+
+      // Gather constants values
+      int SrcIndices[] = {
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
+      };
+      std::vector<unsigned> Consts;
+      for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
+        int OtherSrcIdx = SrcIndices[i];
+        int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
+        if (OtherSrcIdx < 0 || OtherSelIdx < 0) {
+          continue;
+        }
+        if (HasDst) {
+          OtherSrcIdx--;
+          OtherSelIdx--;
+        }
+        if (RegisterSDNode *Reg =
+                         dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) {
+          if (Reg->getReg() == AMDGPU::ALU_CONST) {
+            ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx));
+            Consts.push_back(Cst->getZExtValue());
+          }
+        }
+      }
+
+      ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
+      Consts.push_back(Cst->getZExtValue());
+      if (!TII->fitsConstReadLimitations(Consts))
+        continue;
+
+      // Convert back to SDNode indices
+      if (HasDst) {
+        SrcIdx--;
+        SelIdx--;
+      }
+      std::vector<SDValue> Ops;
+      for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
+        if (i == SrcIdx) {
+          Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32));
+        } else if (i == SelIdx) {
+          Ops.push_back(CstOffset);
+        } else {
+          Ops.push_back(I->getOperand(i));
+        }
+      }
+      CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size());
+    }
+    break;
+  }
   case ISD::BUILD_VECTOR: {
     const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
     if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
@@ -224,7 +312,6 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
     if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
       break;
     }
-    const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo());
 
     uint64_t ImmValue = 0;
     unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
@@ -337,7 +424,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
     if (Result && Result->isMachineOpcode() &&
         !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
         && TII->isALUInstr(Result->getMachineOpcode())) {
-      // Fold FNEG/FABS/CONST_ADDRESS
+      // Fold FNEG/FABS
       // TODO: Isel can generate multiple MachineInst, we need to recursively
       // parse Result
       bool IsModified = false;
@@ -377,24 +464,8 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
 }
 
 bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
-                                     SDValue &Abs, const R600InstrInfo *TII,
-                                     std::vector<unsigned> Consts) {
+                                     SDValue &Abs, const R600InstrInfo *TII) {
   switch (Src.getOpcode()) {
-  case AMDGPUISD::CONST_ADDRESS: {
-    SDValue CstOffset;
-    if (Src.getValueType().isVector() ||
-        !SelectGlobalValueConstantOffset(Src.getOperand(0), CstOffset))
-      return false;
-
-    ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
-    Consts.push_back(Cst->getZExtValue());
-    if (!TII->fitsConstReadLimitations(Consts))
-      return false;
-
-    Src = CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32);
-    Sel = CstOffset;
-    return true;
-    }
   case ISD::FNEG:
     Src = Src.getOperand(0);
     Neg = CurDAG->getTargetConstant(1, MVT::i32);
@@ -436,19 +507,6 @@ bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
     -1
   };
 
-  // Gather constants values
-  std::vector<unsigned> Consts;
-  for (unsigned j = 0; j < 3; j++) {
-    int SrcIdx = OperandIdx[j];
-    if (SrcIdx < 0)
-      break;
-    if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) {
-      if (Reg->getReg() == AMDGPU::ALU_CONST) {
-        ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]);
-        Consts.push_back(Cst->getZExtValue());
-      }
-    }
-  }
 
   for (unsigned i = 0; i < 3; i++) {
     if (OperandIdx[i] < 0)
@@ -458,7 +516,7 @@ bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
     SDValue &Neg = Ops[NegIdx[i] - 1];
     SDValue FakeAbs;
     SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
-    if (FoldOperand(Src, Sel, Neg, Abs, TII, Consts))
+    if (FoldOperand(Src, Sel, Neg, Abs, TII))
       return true;
   }
   return false;
@@ -507,20 +565,6 @@ bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
     TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
   };
 
-  // Gather constants values
-  std::vector<unsigned> Consts;
-  for (unsigned j = 0; j < 8; j++) {
-    int SrcIdx = OperandIdx[j];
-    if (SrcIdx < 0)
-      break;
-    if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) {
-      if (Reg->getReg() == AMDGPU::ALU_CONST) {
-        ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]);
-        Consts.push_back(Cst->getZExtValue());
-      }
-    }
-  }
-
   for (unsigned i = 0; i < 8; i++) {
     if (OperandIdx[i] < 0)
       return false;
@@ -528,7 +572,7 @@ bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
     SDValue &Sel = Ops[SelIdx[i] - 1];
     SDValue &Neg = Ops[NegIdx[i] - 1];
     SDValue &Abs = Ops[AbsIdx[i] - 1];
-    if (FoldOperand(Src, Sel, Neg, Abs, TII, Consts))
+    if (FoldOperand(Src, Sel, Neg, Abs, TII))
       return true;
   }
   return false;
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 2cc87b1..62f1b08 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -1104,6 +1104,30 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
     return DAG.getMergeValues(MergedValues, 2, DL);
   }
 
+  // For most operations returning SDValue() will result int he node being
+  // expanded by the DAG Legalizer.  This is not the case for ISD::LOAD, so
+  // we need to manually expand loads that may be legal in some address spaces
+  // and illegal in others.  SEXT loads from CONSTANT_BUFFER_0 are supported
+  // for compute shaders, since the data is sign extended when it is uploaded
+  // to the buffer.  Howerver SEXT loads from other addresspaces are not
+  // supported, so we need to expand them here.
+  if (LoadNode->getExtensionType() == ISD::SEXTLOAD) {
+    EVT MemVT = LoadNode->getMemoryVT();
+    assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8));
+    SDValue ShiftAmount =
+          DAG.getConstant(VT.getSizeInBits() - MemVT.getSizeInBits(), MVT::i32);
+    SDValue NewLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT, Chain, Ptr,
+                                  LoadNode->getPointerInfo(), MemVT,
+                                  LoadNode->isVolatile(),
+                                  LoadNode->isNonTemporal(),
+                                  LoadNode->getAlignment());
+    SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, NewLoad, ShiftAmount);
+    SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Shl, ShiftAmount);
+
+    SDValue MergedValues[2] = { Sra, Chain };
+    return DAG.getMergeValues(MergedValues, 2, DL);
+  }
+
   if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
     return SDValue();
   }
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index d17425f..3e455c8 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -165,6 +165,42 @@ bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
          usesTextureCache(MI->getOpcode());
 }
 
+int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
+  static const unsigned OpTable[] = {
+    AMDGPU::OpName::src0,
+    AMDGPU::OpName::src1,
+    AMDGPU::OpName::src2
+  };
+
+  assert (SrcNum < 3);
+  return getOperandIdx(Opcode, OpTable[SrcNum]);
+}
+
+#define SRC_SEL_ROWS 11
+int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
+  static const unsigned SrcSelTable[SRC_SEL_ROWS][2] = {
+    {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
+    {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
+    {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
+    {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
+    {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
+    {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
+    {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
+    {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
+    {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
+    {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
+    {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
+  };
+
+  for (unsigned i = 0; i < SRC_SEL_ROWS; ++i) {
+    if (getOperandIdx(Opcode, SrcSelTable[i][0]) == (int)SrcIdx) {
+      return getOperandIdx(Opcode, SrcSelTable[i][1]);
+    }
+  }
+  return -1;
+}
+#undef SRC_SEL_ROWS
+
 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
 R600InstrInfo::getSrcs(MachineInstr *MI) const {
   SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
index f06abf6..f839c6b 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/R600/R600InstrInfo.h
@@ -72,6 +72,13 @@ namespace llvm {
   bool usesTextureCache(unsigned Opcode) const;
   bool usesTextureCache(const MachineInstr *MI) const;
 
+  /// \returns The operand index for the given source number.  Legal values
+  /// for SrcNum are 0, 1, and 2. 
+  int getSrcIdx(unsigned Opcode, unsigned SrcNum) const;
+  /// \returns The operand Index for the Sel operand given an index to one
+  /// of the instruction's src operands.
+  int getSelIdx(unsigned Opcode, unsigned SrcIdx) const;
+
   /// \returns a pair for each src of an ALU instructions.
   /// The first member of a pair is the register id.
   /// If register is ALU_CONST, second member is SEL.
diff --git a/test/CodeGen/R600/bfi_int.ll b/test/CodeGen/R600/bfi_int.ll
index b001ad0..501c556 100644
--- a/test/CodeGen/R600/bfi_int.ll
+++ b/test/CodeGen/R600/bfi_int.ll
@@ -36,7 +36,7 @@ entry:
 ; SHA-256 Ma function
 ; ((x & z) | (y & (x | z)))
 ; R600-CHECK: @bfi_sha256_ma
-; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]],
+; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], KC0[2].Z, KC0[2].W
 ; R600-CHECK: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
 ; SI-CHECK: V_XOR_B32_e64 [[DST:VGPR[0-9]+]], {{[SV]GPR[0-9]+, [SV]GPR[0-9]+}}
 ; SI-CHECK: V_BFI_B32 {{VGPR[0-9]+}}, [[DST]], {{[SV]GPR[0-9]+, [SV]GPR[0-9]+}}
-- 
1.7.11.4

-------------- next part --------------
>From baeb289bd44c5471a8a48d7b6a0dd61b6016562b Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Mon, 17 Jun 2013 08:29:16 -0700
Subject: [PATCH 07/10] R600: Rename AMDILISelDAGToDAG.cpp ->
 AMDGPUISelDAGToDAG.cpp

---
 lib/Target/R600/AMDGPUISelDAGToDAG.cpp | 784 +++++++++++++++++++++++++++++++++
 lib/Target/R600/AMDILISelDAGToDAG.cpp  | 784 ---------------------------------
 lib/Target/R600/CMakeLists.txt         |   2 +-
 3 files changed, 785 insertions(+), 785 deletions(-)
 create mode 100644 lib/Target/R600/AMDGPUISelDAGToDAG.cpp
 delete mode 100644 lib/Target/R600/AMDILISelDAGToDAG.cpp

diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
new file mode 100644
index 0000000..fed044a
--- /dev/null
+++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -0,0 +1,784 @@
+//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//==-----------------------------------------------------------------------===//
+//
+/// \file
+/// \brief Defines an instruction selector for the AMDGPU target.
+//
+//===----------------------------------------------------------------------===//
+#include "AMDGPUInstrInfo.h"
+#include "AMDGPUISelLowering.h" // For AMDGPUISD
+#include "AMDGPURegisterInfo.h"
+#include "R600InstrInfo.h"
+#include "SIISelLowering.h"
+#include "llvm/ADT/ValueMap.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Compiler.h"
+#include <list>
+#include <queue>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// AMDGPU specific code to select AMDGPU machine instructions for
+/// SelectionDAG operations.
+class AMDGPUDAGToDAGISel : public SelectionDAGISel {
+  // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
+  // make the right decision when generating code for different targets.
+  const AMDGPUSubtarget &Subtarget;
+public:
+  AMDGPUDAGToDAGISel(TargetMachine &TM);
+  virtual ~AMDGPUDAGToDAGISel();
+
+  SDNode *Select(SDNode *N);
+  virtual const char *getPassName() const;
+  virtual void PostprocessISelDAG();
+
+private:
+  inline SDValue getSmallIPtrImm(unsigned Imm);
+  bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
+                   const R600InstrInfo *TII);
+  bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
+  bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
+
+  // Complex pattern selectors
+  bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
+  bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
+  bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
+
+  static bool checkType(const Value *ptr, unsigned int addrspace);
+
+  static bool isGlobalStore(const StoreSDNode *N);
+  static bool isPrivateStore(const StoreSDNode *N);
+  static bool isLocalStore(const StoreSDNode *N);
+  static bool isRegionStore(const StoreSDNode *N);
+
+  bool isCPLoad(const LoadSDNode *N) const;
+  bool isConstantLoad(const LoadSDNode *N, int cbID) const;
+  bool isGlobalLoad(const LoadSDNode *N) const;
+  bool isParamLoad(const LoadSDNode *N) const;
+  bool isPrivateLoad(const LoadSDNode *N) const;
+  bool isLocalLoad(const LoadSDNode *N) const;
+  bool isRegionLoad(const LoadSDNode *N) const;
+
+  bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
+  bool SelectGlobalValueVariableOffset(SDValue Addr,
+      SDValue &BaseReg, SDValue& Offset);
+  bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
+  bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
+
+  // Include the pieces autogenerated from the target description.
+#include "AMDGPUGenDAGISel.inc"
+};
+}  // end anonymous namespace
+
+/// \brief This pass converts a legalized DAG into a AMDGPU-specific
+// DAG, ready for instruction scheduling.
+FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
+                                       ) {
+  return new AMDGPUDAGToDAGISel(TM);
+}
+
+AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
+  : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
+}
+
+AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
+}
+
+SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
+  return CurDAG->getTargetConstant(Imm, MVT::i32);
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRParam(
+    SDValue Addr, SDValue& R1, SDValue& R2) {
+
+  if (Addr.getOpcode() == ISD::FrameIndex) {
+    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+      R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+      R2 = CurDAG->getTargetConstant(0, MVT::i32);
+    } else {
+      R1 = Addr;
+      R2 = CurDAG->getTargetConstant(0, MVT::i32);
+    }
+  } else if (Addr.getOpcode() == ISD::ADD) {
+    R1 = Addr.getOperand(0);
+    R2 = Addr.getOperand(1);
+  } else {
+    R1 = Addr;
+    R2 = CurDAG->getTargetConstant(0, MVT::i32);
+  }
+  return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
+  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+      Addr.getOpcode() == ISD::TargetGlobalAddress) {
+    return false;
+  }
+  return SelectADDRParam(Addr, R1, R2);
+}
+
+
+bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
+  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+      Addr.getOpcode() == ISD::TargetGlobalAddress) {
+    return false;
+  }
+
+  if (Addr.getOpcode() == ISD::FrameIndex) {
+    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+      R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
+      R2 = CurDAG->getTargetConstant(0, MVT::i64);
+    } else {
+      R1 = Addr;
+      R2 = CurDAG->getTargetConstant(0, MVT::i64);
+    }
+  } else if (Addr.getOpcode() == ISD::ADD) {
+    R1 = Addr.getOperand(0);
+    R2 = Addr.getOperand(1);
+  } else {
+    R1 = Addr;
+    R2 = CurDAG->getTargetConstant(0, MVT::i64);
+  }
+  return true;
+}
+
+SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
+  const R600InstrInfo *TII =
+                      static_cast<const R600InstrInfo*>(TM.getInstrInfo());
+  unsigned int Opc = N->getOpcode();
+  if (N->isMachineOpcode()) {
+    return NULL;   // Already selected.
+  }
+  switch (Opc) {
+  default: break;
+  case AMDGPUISD::CONST_ADDRESS: {
+    for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I);
+                              I != SDNode::use_end(); I = Next) {
+      Next = llvm::next(I);
+      if (!I->isMachineOpcode()) {
+        continue;
+      }
+      unsigned Opcode = I->getMachineOpcode();
+      bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
+      int SrcIdx = I.getOperandNo();
+      int SelIdx;
+      // Unlike MachineInstrs, SDNodes do not have results in their operand
+      // list, so we need to increment the SrcIdx, since
+      // R600InstrInfo::getOperandIdx is based on the MachineInstr indices.
+      if (HasDst) {
+        SrcIdx++;
+      }
+
+      SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx);
+      if (SelIdx < 0) {
+        continue;
+      }
+
+      SDValue CstOffset;
+      if (N->getValueType(0).isVector() ||
+          !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset))
+        continue;
+
+      // Gather constants values
+      int SrcIndices[] = {
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
+        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
+      };
+      std::vector<unsigned> Consts;
+      for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
+        int OtherSrcIdx = SrcIndices[i];
+        int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
+        if (OtherSrcIdx < 0 || OtherSelIdx < 0) {
+          continue;
+        }
+        if (HasDst) {
+          OtherSrcIdx--;
+          OtherSelIdx--;
+        }
+        if (RegisterSDNode *Reg =
+                         dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) {
+          if (Reg->getReg() == AMDGPU::ALU_CONST) {
+            ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx));
+            Consts.push_back(Cst->getZExtValue());
+          }
+        }
+      }
+
+      ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
+      Consts.push_back(Cst->getZExtValue());
+      if (!TII->fitsConstReadLimitations(Consts))
+        continue;
+
+      // Convert back to SDNode indices
+      if (HasDst) {
+        SrcIdx--;
+        SelIdx--;
+      }
+      std::vector<SDValue> Ops;
+      for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
+        if (i == SrcIdx) {
+          Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32));
+        } else if (i == SelIdx) {
+          Ops.push_back(CstOffset);
+        } else {
+          Ops.push_back(I->getOperand(i));
+        }
+      }
+      CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size());
+    }
+    break;
+  }
+  case ISD::BUILD_VECTOR: {
+    const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+    if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
+      break;
+    }
+    // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
+    // that adds a 128 bits reg copy when going through TwoAddressInstructions
+    // pass. We want to avoid 128 bits copies as much as possible because they
+    // can't be bundled by our scheduler.
+    SDValue RegSeqArgs[9] = {
+      CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32),
+      SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
+      SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
+      SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32),
+      SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32)
+    };
+    bool IsRegSeq = true;
+    for (unsigned i = 0; i < N->getNumOperands(); i++) {
+      if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
+        IsRegSeq = false;
+        break;
+      }
+      RegSeqArgs[2 * i + 1] = N->getOperand(i);
+    }
+    if (!IsRegSeq)
+      break;
+    return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
+        RegSeqArgs, 2 * N->getNumOperands() + 1);
+  }
+  case ISD::BUILD_PAIR: {
+    SDValue RC, SubReg0, SubReg1;
+    const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+    if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+      break;
+    }
+    if (N->getValueType(0) == MVT::i128) {
+      RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
+      SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
+      SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
+    } else if (N->getValueType(0) == MVT::i64) {
+      RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
+      SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
+      SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
+    } else {
+      llvm_unreachable("Unhandled value type for BUILD_PAIR");
+    }
+    const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
+                            N->getOperand(1), SubReg1 };
+    return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
+                                  SDLoc(N), N->getValueType(0), Ops);
+  }
+
+  case ISD::ConstantFP:
+  case ISD::Constant: {
+    const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+    // XXX: Custom immediate lowering not implemented yet.  Instead we use
+    // pseudo instructions defined in SIInstructions.td
+    if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
+      break;
+    }
+
+    uint64_t ImmValue = 0;
+    unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
+
+    if (N->getOpcode() == ISD::ConstantFP) {
+      // XXX: 64-bit Immediates not supported yet
+      assert(N->getValueType(0) != MVT::f64);
+
+      ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
+      APFloat Value = C->getValueAPF();
+      float FloatValue = Value.convertToFloat();
+      if (FloatValue == 0.0) {
+        ImmReg = AMDGPU::ZERO;
+      } else if (FloatValue == 0.5) {
+        ImmReg = AMDGPU::HALF;
+      } else if (FloatValue == 1.0) {
+        ImmReg = AMDGPU::ONE;
+      } else {
+        ImmValue = Value.bitcastToAPInt().getZExtValue();
+      }
+    } else {
+      // XXX: 64-bit Immediates not supported yet
+      assert(N->getValueType(0) != MVT::i64);
+
+      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
+      if (C->getZExtValue() == 0) {
+        ImmReg = AMDGPU::ZERO;
+      } else if (C->getZExtValue() == 1) {
+        ImmReg = AMDGPU::ONE_INT;
+      } else {
+        ImmValue = C->getZExtValue();
+      }
+    }
+
+    for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
+                              Use != SDNode::use_end(); Use = Next) {
+      Next = llvm::next(Use);
+      std::vector<SDValue> Ops;
+      for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
+        Ops.push_back(Use->getOperand(i));
+      }
+
+      if (!Use->isMachineOpcode()) {
+          if (ImmReg == AMDGPU::ALU_LITERAL_X) {
+            // We can only use literal constants (e.g. AMDGPU::ZERO,
+            // AMDGPU::ONE, etc) in machine opcodes.
+            continue;
+          }
+      } else {
+        if (!TII->isALUInstr(Use->getMachineOpcode()) ||
+            (TII->get(Use->getMachineOpcode()).TSFlags &
+            R600_InstFlag::VECTOR)) {
+          continue;
+        }
+
+        int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
+                                        AMDGPU::OpName::literal);
+        assert(ImmIdx != -1);
+
+        // subtract one from ImmIdx, because the DST operand is usually index
+        // 0 for MachineInstrs, but we have no DST in the Ops vector.
+        ImmIdx--;
+
+        // Check that we aren't already using an immediate.
+        // XXX: It's possible for an instruction to have more than one
+        // immediate operand, but this is not supported yet.
+        if (ImmReg == AMDGPU::ALU_LITERAL_X) {
+          ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
+          assert(C);
+
+          if (C->getZExtValue() != 0) {
+            // This instruction is already using an immediate.
+            continue;
+          }
+
+          // Set the immediate value
+          Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
+        }
+      }
+      // Set the immediate register
+      Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
+
+      CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
+    }
+    break;
+  }
+  }
+  SDNode *Result = SelectCode(N);
+
+  // Fold operands of selected node
+
+  const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
+  if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
+    const R600InstrInfo *TII =
+        static_cast<const R600InstrInfo*>(TM.getInstrInfo());
+    if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) {
+      bool IsModified = false;
+      do {
+        std::vector<SDValue> Ops;
+        for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
+            I != E; ++I)
+          Ops.push_back(*I);
+        IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops);
+        if (IsModified) {
+          Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
+        }
+      } while (IsModified);
+
+    }
+    if (Result && Result->isMachineOpcode() &&
+        !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
+        && TII->isALUInstr(Result->getMachineOpcode())) {
+      // Fold FNEG/FABS
+      // TODO: Isel can generate multiple MachineInst, we need to recursively
+      // parse Result
+      bool IsModified = false;
+      do {
+        std::vector<SDValue> Ops;
+        for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
+            I != E; ++I)
+          Ops.push_back(*I);
+        IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
+        if (IsModified) {
+          Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
+        }
+      } while (IsModified);
+
+      // If node has a single use which is CLAMP_R600, folds it
+      if (Result->hasOneUse() && Result->isMachineOpcode()) {
+        SDNode *PotentialClamp = *Result->use_begin();
+        if (PotentialClamp->isMachineOpcode() &&
+            PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
+          unsigned ClampIdx =
+            TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
+          std::vector<SDValue> Ops;
+          unsigned NumOp = Result->getNumOperands();
+          for (unsigned i = 0; i < NumOp; ++i) {
+            Ops.push_back(Result->getOperand(i));
+          }
+          Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
+          Result = CurDAG->SelectNodeTo(PotentialClamp,
+              Result->getMachineOpcode(), PotentialClamp->getVTList(),
+              Ops.data(), NumOp);
+        }
+      }
+    }
+  }
+
+  return Result;
+}
+
+bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
+                                     SDValue &Abs, const R600InstrInfo *TII) {
+  switch (Src.getOpcode()) {
+  case ISD::FNEG:
+    Src = Src.getOperand(0);
+    Neg = CurDAG->getTargetConstant(1, MVT::i32);
+    return true;
+  case ISD::FABS:
+    if (!Abs.getNode())
+      return false;
+    Src = Src.getOperand(0);
+    Abs = CurDAG->getTargetConstant(1, MVT::i32);
+    return true;
+  case ISD::BITCAST:
+    Src = Src.getOperand(0);
+    return true;
+  default:
+    return false;
+  }
+}
+
+bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
+    const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
+  int OperandIdx[] = {
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
+  };
+  int SelIdx[] = {
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
+  };
+  int NegIdx[] = {
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
+  };
+  int AbsIdx[] = {
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
+    -1
+  };
+
+
+  for (unsigned i = 0; i < 3; i++) {
+    if (OperandIdx[i] < 0)
+      return false;
+    SDValue &Src = Ops[OperandIdx[i] - 1];
+    SDValue &Sel = Ops[SelIdx[i] - 1];
+    SDValue &Neg = Ops[NegIdx[i] - 1];
+    SDValue FakeAbs;
+    SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
+    if (FoldOperand(Src, Sel, Neg, Abs, TII))
+      return true;
+  }
+  return false;
+}
+
+bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
+    const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
+  int OperandIdx[] = {
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
+  };
+  int SelIdx[] = {
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
+  };
+  int NegIdx[] = {
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
+  };
+  int AbsIdx[] = {
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
+    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
+  };
+
+  for (unsigned i = 0; i < 8; i++) {
+    if (OperandIdx[i] < 0)
+      return false;
+    SDValue &Src = Ops[OperandIdx[i] - 1];
+    SDValue &Sel = Ops[SelIdx[i] - 1];
+    SDValue &Neg = Ops[NegIdx[i] - 1];
+    SDValue &Abs = Ops[AbsIdx[i] - 1];
+    if (FoldOperand(Src, Sel, Neg, Abs, TII))
+      return true;
+  }
+  return false;
+}
+
+bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
+  if (!ptr) {
+    return false;
+  }
+  Type *ptrType = ptr->getType();
+  return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
+}
+
+bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
+  return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
+  return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
+          && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
+          && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
+}
+
+bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
+  return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
+  return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
+  if (CbId == -1) {
+    return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
+  }
+  return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
+}
+
+bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
+  return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
+  return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isLocalLoad(const  LoadSDNode *N) const {
+  return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isRegionLoad(const  LoadSDNode *N) const {
+  return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
+}
+
+bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
+  MachineMemOperand *MMO = N->getMemOperand();
+  if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
+    if (MMO) {
+      const Value *V = MMO->getValue();
+      const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
+      if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
+  if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
+    // Check to make sure we are not a constant pool load or a constant load
+    // that is marked as a private load
+    if (isCPLoad(N) || isConstantLoad(N, -1)) {
+      return false;
+    }
+  }
+  if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
+      && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
+    return true;
+  }
+  return false;
+}
+
+const char *AMDGPUDAGToDAGISel::getPassName() const {
+  return "AMDGPU DAG->DAG Pattern Instruction Selection";
+}
+
+#ifdef DEBUGTMP
+#undef INT64_C
+#endif
+#undef DEBUGTMP
+
+///==== AMDGPU Functions ====///
+
+bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
+    SDValue& IntPtr) {
+  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
+    IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
+    return true;
+  }
+  return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
+    SDValue& BaseReg, SDValue &Offset) {
+  if (!dyn_cast<ConstantSDNode>(Addr)) {
+    BaseReg = Addr;
+    Offset = CurDAG->getIntPtrConstant(0, true);
+    return true;
+  }
+  return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
+                                           SDValue &Offset) {
+  ConstantSDNode * IMMOffset;
+
+  if (Addr.getOpcode() == ISD::ADD
+      && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+      && isInt<16>(IMMOffset->getZExtValue())) {
+
+      Base = Addr.getOperand(0);
+      Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+      return true;
+  // If the pointer address is constant, we can move it to the offset field.
+  } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
+             && isInt<16>(IMMOffset->getZExtValue())) {
+    Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
+                                  SDLoc(CurDAG->getEntryNode()),
+                                  AMDGPU::ZERO, MVT::i32);
+    Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
+    return true;
+  }
+
+  // Default case, no offset
+  Base = Addr;
+  Offset = CurDAG->getTargetConstant(0, MVT::i32);
+  return true;
+}
+
+bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
+                                            SDValue &Offset) {
+  ConstantSDNode *C;
+
+  if ((C = dyn_cast<ConstantSDNode>(Addr))) {
+    Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
+    Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+  } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
+            (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
+    Base = Addr.getOperand(0);
+    Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
+  } else {
+    Base = Addr;
+    Offset = CurDAG->getTargetConstant(0, MVT::i32);
+  }
+
+  return true;
+}
+
+void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
+
+  if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
+    return;
+  }
+
+  // Go over all selected nodes and try to fold them a bit more
+  const AMDGPUTargetLowering& Lowering =
+    (*(const AMDGPUTargetLowering*)getTargetLowering());
+  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
+       E = CurDAG->allnodes_end(); I != E; ++I) {
+
+    SDNode *Node = I;
+    switch (Node->getOpcode()) {
+    // Fix the register class in copy to CopyToReg nodes - ISel will always
+    // use SReg classes for 64-bit copies, but this is not always what we want.
+    case ISD::CopyToReg: {
+      unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
+      SDValue Val = Node->getOperand(2);
+      const TargetRegisterClass *RC = RegInfo->getRegClass(Reg);
+      if (RC != &AMDGPU::SReg_64RegClass) {
+        continue;
+      }
+
+      if (!Val.getNode()->isMachineOpcode() ||
+          Val.getNode()->getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
+        continue;
+      }
+
+      const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode());
+      const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+      RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass));
+      continue;
+    }
+    }
+
+    MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
+    if (!MachineNode)
+      continue;
+
+    SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
+    if (ResNode != Node) {
+      ReplaceUses(Node, ResNode);
+    }
+  }
+}
diff --git a/lib/Target/R600/AMDILISelDAGToDAG.cpp b/lib/Target/R600/AMDILISelDAGToDAG.cpp
deleted file mode 100644
index fed044a..0000000
--- a/lib/Target/R600/AMDILISelDAGToDAG.cpp
+++ /dev/null
@@ -1,784 +0,0 @@
-//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Defines an instruction selector for the AMDGPU target.
-//
-//===----------------------------------------------------------------------===//
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPUISelLowering.h" // For AMDGPUISD
-#include "AMDGPURegisterInfo.h"
-#include "R600InstrInfo.h"
-#include "SIISelLowering.h"
-#include "llvm/ADT/ValueMap.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Support/Compiler.h"
-#include <list>
-#include <queue>
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Instruction Selector Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-/// AMDGPU specific code to select AMDGPU machine instructions for
-/// SelectionDAG operations.
-class AMDGPUDAGToDAGISel : public SelectionDAGISel {
-  // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
-  // make the right decision when generating code for different targets.
-  const AMDGPUSubtarget &Subtarget;
-public:
-  AMDGPUDAGToDAGISel(TargetMachine &TM);
-  virtual ~AMDGPUDAGToDAGISel();
-
-  SDNode *Select(SDNode *N);
-  virtual const char *getPassName() const;
-  virtual void PostprocessISelDAG();
-
-private:
-  inline SDValue getSmallIPtrImm(unsigned Imm);
-  bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
-                   const R600InstrInfo *TII);
-  bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
-  bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
-
-  // Complex pattern selectors
-  bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
-  bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
-  bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
-
-  static bool checkType(const Value *ptr, unsigned int addrspace);
-
-  static bool isGlobalStore(const StoreSDNode *N);
-  static bool isPrivateStore(const StoreSDNode *N);
-  static bool isLocalStore(const StoreSDNode *N);
-  static bool isRegionStore(const StoreSDNode *N);
-
-  bool isCPLoad(const LoadSDNode *N) const;
-  bool isConstantLoad(const LoadSDNode *N, int cbID) const;
-  bool isGlobalLoad(const LoadSDNode *N) const;
-  bool isParamLoad(const LoadSDNode *N) const;
-  bool isPrivateLoad(const LoadSDNode *N) const;
-  bool isLocalLoad(const LoadSDNode *N) const;
-  bool isRegionLoad(const LoadSDNode *N) const;
-
-  bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
-  bool SelectGlobalValueVariableOffset(SDValue Addr,
-      SDValue &BaseReg, SDValue& Offset);
-  bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
-  bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
-
-  // Include the pieces autogenerated from the target description.
-#include "AMDGPUGenDAGISel.inc"
-};
-}  // end anonymous namespace
-
-/// \brief This pass converts a legalized DAG into a AMDGPU-specific
-// DAG, ready for instruction scheduling.
-FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
-                                       ) {
-  return new AMDGPUDAGToDAGISel(TM);
-}
-
-AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
-  : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
-}
-
-AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
-}
-
-SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
-  return CurDAG->getTargetConstant(Imm, MVT::i32);
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRParam(
-    SDValue Addr, SDValue& R1, SDValue& R2) {
-
-  if (Addr.getOpcode() == ISD::FrameIndex) {
-    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
-      R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
-      R2 = CurDAG->getTargetConstant(0, MVT::i32);
-    } else {
-      R1 = Addr;
-      R2 = CurDAG->getTargetConstant(0, MVT::i32);
-    }
-  } else if (Addr.getOpcode() == ISD::ADD) {
-    R1 = Addr.getOperand(0);
-    R2 = Addr.getOperand(1);
-  } else {
-    R1 = Addr;
-    R2 = CurDAG->getTargetConstant(0, MVT::i32);
-  }
-  return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
-  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
-      Addr.getOpcode() == ISD::TargetGlobalAddress) {
-    return false;
-  }
-  return SelectADDRParam(Addr, R1, R2);
-}
-
-
-bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
-  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
-      Addr.getOpcode() == ISD::TargetGlobalAddress) {
-    return false;
-  }
-
-  if (Addr.getOpcode() == ISD::FrameIndex) {
-    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
-      R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
-      R2 = CurDAG->getTargetConstant(0, MVT::i64);
-    } else {
-      R1 = Addr;
-      R2 = CurDAG->getTargetConstant(0, MVT::i64);
-    }
-  } else if (Addr.getOpcode() == ISD::ADD) {
-    R1 = Addr.getOperand(0);
-    R2 = Addr.getOperand(1);
-  } else {
-    R1 = Addr;
-    R2 = CurDAG->getTargetConstant(0, MVT::i64);
-  }
-  return true;
-}
-
-SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
-  const R600InstrInfo *TII =
-                      static_cast<const R600InstrInfo*>(TM.getInstrInfo());
-  unsigned int Opc = N->getOpcode();
-  if (N->isMachineOpcode()) {
-    return NULL;   // Already selected.
-  }
-  switch (Opc) {
-  default: break;
-  case AMDGPUISD::CONST_ADDRESS: {
-    for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I);
-                              I != SDNode::use_end(); I = Next) {
-      Next = llvm::next(I);
-      if (!I->isMachineOpcode()) {
-        continue;
-      }
-      unsigned Opcode = I->getMachineOpcode();
-      bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
-      int SrcIdx = I.getOperandNo();
-      int SelIdx;
-      // Unlike MachineInstrs, SDNodes do not have results in their operand
-      // list, so we need to increment the SrcIdx, since
-      // R600InstrInfo::getOperandIdx is based on the MachineInstr indices.
-      if (HasDst) {
-        SrcIdx++;
-      }
-
-      SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx);
-      if (SelIdx < 0) {
-        continue;
-      }
-
-      SDValue CstOffset;
-      if (N->getValueType(0).isVector() ||
-          !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset))
-        continue;
-
-      // Gather constants values
-      int SrcIndices[] = {
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
-        TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
-      };
-      std::vector<unsigned> Consts;
-      for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
-        int OtherSrcIdx = SrcIndices[i];
-        int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
-        if (OtherSrcIdx < 0 || OtherSelIdx < 0) {
-          continue;
-        }
-        if (HasDst) {
-          OtherSrcIdx--;
-          OtherSelIdx--;
-        }
-        if (RegisterSDNode *Reg =
-                         dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) {
-          if (Reg->getReg() == AMDGPU::ALU_CONST) {
-            ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx));
-            Consts.push_back(Cst->getZExtValue());
-          }
-        }
-      }
-
-      ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
-      Consts.push_back(Cst->getZExtValue());
-      if (!TII->fitsConstReadLimitations(Consts))
-        continue;
-
-      // Convert back to SDNode indices
-      if (HasDst) {
-        SrcIdx--;
-        SelIdx--;
-      }
-      std::vector<SDValue> Ops;
-      for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
-        if (i == SrcIdx) {
-          Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32));
-        } else if (i == SelIdx) {
-          Ops.push_back(CstOffset);
-        } else {
-          Ops.push_back(I->getOperand(i));
-        }
-      }
-      CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size());
-    }
-    break;
-  }
-  case ISD::BUILD_VECTOR: {
-    const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
-    if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
-      break;
-    }
-    // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
-    // that adds a 128 bits reg copy when going through TwoAddressInstructions
-    // pass. We want to avoid 128 bits copies as much as possible because they
-    // can't be bundled by our scheduler.
-    SDValue RegSeqArgs[9] = {
-      CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32),
-      SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
-      SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
-      SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32),
-      SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32)
-    };
-    bool IsRegSeq = true;
-    for (unsigned i = 0; i < N->getNumOperands(); i++) {
-      if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
-        IsRegSeq = false;
-        break;
-      }
-      RegSeqArgs[2 * i + 1] = N->getOperand(i);
-    }
-    if (!IsRegSeq)
-      break;
-    return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
-        RegSeqArgs, 2 * N->getNumOperands() + 1);
-  }
-  case ISD::BUILD_PAIR: {
-    SDValue RC, SubReg0, SubReg1;
-    const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
-    if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
-      break;
-    }
-    if (N->getValueType(0) == MVT::i128) {
-      RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
-      SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
-      SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
-    } else if (N->getValueType(0) == MVT::i64) {
-      RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
-      SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
-      SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
-    } else {
-      llvm_unreachable("Unhandled value type for BUILD_PAIR");
-    }
-    const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
-                            N->getOperand(1), SubReg1 };
-    return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
-                                  SDLoc(N), N->getValueType(0), Ops);
-  }
-
-  case ISD::ConstantFP:
-  case ISD::Constant: {
-    const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
-    // XXX: Custom immediate lowering not implemented yet.  Instead we use
-    // pseudo instructions defined in SIInstructions.td
-    if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
-      break;
-    }
-
-    uint64_t ImmValue = 0;
-    unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
-
-    if (N->getOpcode() == ISD::ConstantFP) {
-      // XXX: 64-bit Immediates not supported yet
-      assert(N->getValueType(0) != MVT::f64);
-
-      ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
-      APFloat Value = C->getValueAPF();
-      float FloatValue = Value.convertToFloat();
-      if (FloatValue == 0.0) {
-        ImmReg = AMDGPU::ZERO;
-      } else if (FloatValue == 0.5) {
-        ImmReg = AMDGPU::HALF;
-      } else if (FloatValue == 1.0) {
-        ImmReg = AMDGPU::ONE;
-      } else {
-        ImmValue = Value.bitcastToAPInt().getZExtValue();
-      }
-    } else {
-      // XXX: 64-bit Immediates not supported yet
-      assert(N->getValueType(0) != MVT::i64);
-
-      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
-      if (C->getZExtValue() == 0) {
-        ImmReg = AMDGPU::ZERO;
-      } else if (C->getZExtValue() == 1) {
-        ImmReg = AMDGPU::ONE_INT;
-      } else {
-        ImmValue = C->getZExtValue();
-      }
-    }
-
-    for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
-                              Use != SDNode::use_end(); Use = Next) {
-      Next = llvm::next(Use);
-      std::vector<SDValue> Ops;
-      for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
-        Ops.push_back(Use->getOperand(i));
-      }
-
-      if (!Use->isMachineOpcode()) {
-          if (ImmReg == AMDGPU::ALU_LITERAL_X) {
-            // We can only use literal constants (e.g. AMDGPU::ZERO,
-            // AMDGPU::ONE, etc) in machine opcodes.
-            continue;
-          }
-      } else {
-        if (!TII->isALUInstr(Use->getMachineOpcode()) ||
-            (TII->get(Use->getMachineOpcode()).TSFlags &
-            R600_InstFlag::VECTOR)) {
-          continue;
-        }
-
-        int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
-                                        AMDGPU::OpName::literal);
-        assert(ImmIdx != -1);
-
-        // subtract one from ImmIdx, because the DST operand is usually index
-        // 0 for MachineInstrs, but we have no DST in the Ops vector.
-        ImmIdx--;
-
-        // Check that we aren't already using an immediate.
-        // XXX: It's possible for an instruction to have more than one
-        // immediate operand, but this is not supported yet.
-        if (ImmReg == AMDGPU::ALU_LITERAL_X) {
-          ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
-          assert(C);
-
-          if (C->getZExtValue() != 0) {
-            // This instruction is already using an immediate.
-            continue;
-          }
-
-          // Set the immediate value
-          Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
-        }
-      }
-      // Set the immediate register
-      Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
-
-      CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
-    }
-    break;
-  }
-  }
-  SDNode *Result = SelectCode(N);
-
-  // Fold operands of selected node
-
-  const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
-  if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
-    const R600InstrInfo *TII =
-        static_cast<const R600InstrInfo*>(TM.getInstrInfo());
-    if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) {
-      bool IsModified = false;
-      do {
-        std::vector<SDValue> Ops;
-        for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
-            I != E; ++I)
-          Ops.push_back(*I);
-        IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops);
-        if (IsModified) {
-          Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
-        }
-      } while (IsModified);
-
-    }
-    if (Result && Result->isMachineOpcode() &&
-        !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
-        && TII->isALUInstr(Result->getMachineOpcode())) {
-      // Fold FNEG/FABS
-      // TODO: Isel can generate multiple MachineInst, we need to recursively
-      // parse Result
-      bool IsModified = false;
-      do {
-        std::vector<SDValue> Ops;
-        for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
-            I != E; ++I)
-          Ops.push_back(*I);
-        IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
-        if (IsModified) {
-          Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
-        }
-      } while (IsModified);
-
-      // If node has a single use which is CLAMP_R600, folds it
-      if (Result->hasOneUse() && Result->isMachineOpcode()) {
-        SDNode *PotentialClamp = *Result->use_begin();
-        if (PotentialClamp->isMachineOpcode() &&
-            PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
-          unsigned ClampIdx =
-            TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
-          std::vector<SDValue> Ops;
-          unsigned NumOp = Result->getNumOperands();
-          for (unsigned i = 0; i < NumOp; ++i) {
-            Ops.push_back(Result->getOperand(i));
-          }
-          Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
-          Result = CurDAG->SelectNodeTo(PotentialClamp,
-              Result->getMachineOpcode(), PotentialClamp->getVTList(),
-              Ops.data(), NumOp);
-        }
-      }
-    }
-  }
-
-  return Result;
-}
-
-bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
-                                     SDValue &Abs, const R600InstrInfo *TII) {
-  switch (Src.getOpcode()) {
-  case ISD::FNEG:
-    Src = Src.getOperand(0);
-    Neg = CurDAG->getTargetConstant(1, MVT::i32);
-    return true;
-  case ISD::FABS:
-    if (!Abs.getNode())
-      return false;
-    Src = Src.getOperand(0);
-    Abs = CurDAG->getTargetConstant(1, MVT::i32);
-    return true;
-  case ISD::BITCAST:
-    Src = Src.getOperand(0);
-    return true;
-  default:
-    return false;
-  }
-}
-
-bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
-    const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
-  int OperandIdx[] = {
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
-  };
-  int SelIdx[] = {
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
-  };
-  int NegIdx[] = {
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
-  };
-  int AbsIdx[] = {
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
-    -1
-  };
-
-
-  for (unsigned i = 0; i < 3; i++) {
-    if (OperandIdx[i] < 0)
-      return false;
-    SDValue &Src = Ops[OperandIdx[i] - 1];
-    SDValue &Sel = Ops[SelIdx[i] - 1];
-    SDValue &Neg = Ops[NegIdx[i] - 1];
-    SDValue FakeAbs;
-    SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
-    if (FoldOperand(Src, Sel, Neg, Abs, TII))
-      return true;
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
-    const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
-  int OperandIdx[] = {
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
-  };
-  int SelIdx[] = {
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
-  };
-  int NegIdx[] = {
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
-  };
-  int AbsIdx[] = {
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
-    TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
-  };
-
-  for (unsigned i = 0; i < 8; i++) {
-    if (OperandIdx[i] < 0)
-      return false;
-    SDValue &Src = Ops[OperandIdx[i] - 1];
-    SDValue &Sel = Ops[SelIdx[i] - 1];
-    SDValue &Neg = Ops[NegIdx[i] - 1];
-    SDValue &Abs = Ops[AbsIdx[i] - 1];
-    if (FoldOperand(Src, Sel, Neg, Abs, TII))
-      return true;
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
-  if (!ptr) {
-    return false;
-  }
-  Type *ptrType = ptr->getType();
-  return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
-  return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
-  return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
-          && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
-          && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
-}
-
-bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
-  return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
-  return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
-  if (CbId == -1) {
-    return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
-  }
-  return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
-  return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
-  return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isLocalLoad(const  LoadSDNode *N) const {
-  return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionLoad(const  LoadSDNode *N) const {
-  return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
-  MachineMemOperand *MMO = N->getMemOperand();
-  if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
-    if (MMO) {
-      const Value *V = MMO->getValue();
-      const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
-      if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
-  if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
-    // Check to make sure we are not a constant pool load or a constant load
-    // that is marked as a private load
-    if (isCPLoad(N) || isConstantLoad(N, -1)) {
-      return false;
-    }
-  }
-  if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
-      && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
-      && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
-      && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
-      && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
-      && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
-    return true;
-  }
-  return false;
-}
-
-const char *AMDGPUDAGToDAGISel::getPassName() const {
-  return "AMDGPU DAG->DAG Pattern Instruction Selection";
-}
-
-#ifdef DEBUGTMP
-#undef INT64_C
-#endif
-#undef DEBUGTMP
-
-///==== AMDGPU Functions ====///
-
-bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
-    SDValue& IntPtr) {
-  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
-    IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
-    return true;
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
-    SDValue& BaseReg, SDValue &Offset) {
-  if (!dyn_cast<ConstantSDNode>(Addr)) {
-    BaseReg = Addr;
-    Offset = CurDAG->getIntPtrConstant(0, true);
-    return true;
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
-                                           SDValue &Offset) {
-  ConstantSDNode * IMMOffset;
-
-  if (Addr.getOpcode() == ISD::ADD
-      && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
-      && isInt<16>(IMMOffset->getZExtValue())) {
-
-      Base = Addr.getOperand(0);
-      Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
-      return true;
-  // If the pointer address is constant, we can move it to the offset field.
-  } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
-             && isInt<16>(IMMOffset->getZExtValue())) {
-    Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
-                                  SDLoc(CurDAG->getEntryNode()),
-                                  AMDGPU::ZERO, MVT::i32);
-    Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
-    return true;
-  }
-
-  // Default case, no offset
-  Base = Addr;
-  Offset = CurDAG->getTargetConstant(0, MVT::i32);
-  return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
-                                            SDValue &Offset) {
-  ConstantSDNode *C;
-
-  if ((C = dyn_cast<ConstantSDNode>(Addr))) {
-    Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
-    Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
-  } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
-            (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
-    Base = Addr.getOperand(0);
-    Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
-  } else {
-    Base = Addr;
-    Offset = CurDAG->getTargetConstant(0, MVT::i32);
-  }
-
-  return true;
-}
-
-void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
-
-  if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
-    return;
-  }
-
-  // Go over all selected nodes and try to fold them a bit more
-  const AMDGPUTargetLowering& Lowering =
-    (*(const AMDGPUTargetLowering*)getTargetLowering());
-  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
-       E = CurDAG->allnodes_end(); I != E; ++I) {
-
-    SDNode *Node = I;
-    switch (Node->getOpcode()) {
-    // Fix the register class in copy to CopyToReg nodes - ISel will always
-    // use SReg classes for 64-bit copies, but this is not always what we want.
-    case ISD::CopyToReg: {
-      unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
-      SDValue Val = Node->getOperand(2);
-      const TargetRegisterClass *RC = RegInfo->getRegClass(Reg);
-      if (RC != &AMDGPU::SReg_64RegClass) {
-        continue;
-      }
-
-      if (!Val.getNode()->isMachineOpcode() ||
-          Val.getNode()->getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
-        continue;
-      }
-
-      const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode());
-      const TargetRegisterInfo *TRI = TM.getRegisterInfo();
-      RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass));
-      continue;
-    }
-    }
-
-    MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
-    if (!MachineNode)
-      continue;
-
-    SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
-    if (ResNode != Node) {
-      ReplaceUses(Node, ResNode);
-    }
-  }
-}
diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/R600/CMakeLists.txt
index 824475e..4f8665b 100644
--- a/lib/Target/R600/CMakeLists.txt
+++ b/lib/Target/R600/CMakeLists.txt
@@ -14,11 +14,11 @@ add_public_tablegen_target(AMDGPUCommonTableGen)
 add_llvm_target(R600CodeGen
   AMDILCFGStructurizer.cpp
   AMDILIntrinsicInfo.cpp
-  AMDILISelDAGToDAG.cpp
   AMDILISelLowering.cpp
   AMDGPUAsmPrinter.cpp
   AMDGPUFrameLowering.cpp
   AMDGPUIndirectAddressing.cpp
+  AMDGPUISelDAGToDAG.cpp
   AMDGPUMCInstLower.cpp
   AMDGPUMachineFunction.cpp
   AMDGPUSubtarget.cpp
-- 
1.7.11.4

-------------- next part --------------
>From 83e85bc1f89bc431dcb6581efaeabd68f799850b Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Mon, 24 Jun 2013 10:44:05 -0700
Subject: [PATCH 08/10] R600: Improve support for < 32-bit loads

---
 lib/Target/R600/AMDGPUInstructions.td | 20 ++++++++++++++--
 lib/Target/R600/R600Instructions.td   |  8 +++++++
 lib/Target/R600/SIISelLowering.cpp    |  4 ++--
 lib/Target/R600/SIInstructions.td     | 16 +++++++++----
 test/CodeGen/R600/load.ll             | 45 +++++++++++++++++++++++++++++++++++
 test/CodeGen/R600/short-args.ll       | 36 +++++++++++++++++-----------
 6 files changed, 106 insertions(+), 23 deletions(-)

diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td
index eadb368..56f28e9 100644
--- a/lib/Target/R600/AMDGPUInstructions.td
+++ b/lib/Target/R600/AMDGPUInstructions.td
@@ -100,10 +100,18 @@ def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
     return isGlobalLoad(dyn_cast<LoadSDNode>(N));
 }]>;
 
-def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
+def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
     return isGlobalLoad(dyn_cast<LoadSDNode>(N));
 }]>;
 
+def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
+    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
+    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
   return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
 }]>;
@@ -112,10 +120,18 @@ def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [
     return isGlobalLoad(dyn_cast<LoadSDNode>(N));
 }]>;
 
-def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
+def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
     return isGlobalLoad(dyn_cast<LoadSDNode>(N));
 }]>;
 
+def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
+    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
+def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
+    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
+}]>;
+
 class Constants {
 int TWO_PI = 0x40c90fdb;
 int PI = 0x40490fdb;
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index 31f98a8..2b5b37b 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -1410,6 +1410,10 @@ def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
   [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
 >;
 
+def VTX_READ_GLOBAL_16_eg : VTX_READ_16_eg <1,
+  [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
+>;
+
 // 32-bit reads
 def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
   [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
@@ -1753,6 +1757,10 @@ def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1,
   [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
 >;
 
+def VTX_READ_GLOBAL_16_cm : VTX_READ_16_cm <1,
+  [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
+>;
+
 // 32-bit reads
 def VTX_READ_GLOBAL_32_cm : VTX_READ_32_cm <1,
   [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
index 9d4cfef..bee97f1 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/R600/SIISelLowering.cpp
@@ -105,9 +105,9 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT,
                            MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64);
   SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
                                              DAG.getConstant(Offset, MVT::i64));
-  return DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, Chain, Ptr,
+  return DAG.getLoad(VT, DL, Chain, Ptr,
                             MachinePointerInfo(UndefValue::get(PtrTy)),
-                            VT, false, false, ArgVT.getSizeInBits() >> 3);
+                            false, false, false, ArgVT.getSizeInBits() >> 3);
 
 }
 
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 1124222..9086628 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -400,9 +400,9 @@ defm BUFFER_LOAD_FORMAT_XYZW : MUBUF_Load_Helper <0x00000003, "BUFFER_LOAD_FORMA
 //def BUFFER_STORE_FORMAT_XYZ : MUBUF_ <0x00000006, "BUFFER_STORE_FORMAT_XYZ", []>;
 //def BUFFER_STORE_FORMAT_XYZW : MUBUF_ <0x00000007, "BUFFER_STORE_FORMAT_XYZW", []>;
 defm BUFFER_LOAD_UBYTE : MUBUF_Load_Helper <0x00000008, "BUFFER_LOAD_UBYTE", VReg_32>;
-//def BUFFER_LOAD_SBYTE : MUBUF_ <0x00000009, "BUFFER_LOAD_SBYTE", []>;
-//def BUFFER_LOAD_USHORT : MUBUF_ <0x0000000a, "BUFFER_LOAD_USHORT", []>;
-//def BUFFER_LOAD_SSHORT : MUBUF_ <0x0000000b, "BUFFER_LOAD_SSHORT", []>;
+defm BUFFER_LOAD_SBYTE : MUBUF_Load_Helper <0x00000009, "BUFFER_LOAD_SBYTE", VReg_32>;
+defm BUFFER_LOAD_USHORT : MUBUF_Load_Helper <0x0000000a, "BUFFER_LOAD_USHORT", VReg_32>;
+defm BUFFER_LOAD_SSHORT : MUBUF_Load_Helper <0x0000000b, "BUFFER_LOAD_SSHORT", VReg_32>;
 defm BUFFER_LOAD_DWORD : MUBUF_Load_Helper <0x0000000c, "BUFFER_LOAD_DWORD", VReg_32>;
 defm BUFFER_LOAD_DWORDX2 : MUBUF_Load_Helper <0x0000000d, "BUFFER_LOAD_DWORDX2", VReg_64>;
 defm BUFFER_LOAD_DWORDX4 : MUBUF_Load_Helper <0x0000000e, "BUFFER_LOAD_DWORDX4", VReg_128>;
@@ -1634,10 +1634,16 @@ multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
   >;
 }
 
-defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32,
-                          global_load, constant_load>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32,
+                          sextloadi8_global, sextloadi8_constant>;
 defm : MUBUFLoad_Pattern <BUFFER_LOAD_UBYTE_ADDR64, i32,
                           az_extloadi8_global, az_extloadi8_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_SSHORT_ADDR64, i32,
+                          sextloadi16_global, sextloadi16_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_USHORT_ADDR64, i32,
+                          az_extloadi16_global, az_extloadi16_constant>;
+defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORD_ADDR64, i32,
+                          global_load, constant_load>;
 defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX2_ADDR64, v2i32,
                           global_load, constant_load>;
 defm : MUBUFLoad_Pattern <BUFFER_LOAD_DWORDX4_ADDR64, v4i32,
diff --git a/test/CodeGen/R600/load.ll b/test/CodeGen/R600/load.ll
index d1ebaa3..ebca580 100644
--- a/test/CodeGen/R600/load.ll
+++ b/test/CodeGen/R600/load.ll
@@ -15,6 +15,51 @@ define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
   ret void
 }
 
+; R600-CHECK: @load_i8_sext
+; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
+; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
+; R600-CHECK: 24
+; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
+; R600-CHECK: 24
+; SI-CHECK: @load_i8_sext
+; SI-CHECK: BUFFER_LOAD_SBYTE
+define void @load_i8_sext(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+entry:
+  %0 = load i8 addrspace(1)* %in
+  %1 = sext i8 %0 to i32
+  store i32 %1, i32 addrspace(1)* %out
+  ret void
+}
+
+; Load an i16 value from the global address space.
+; R600-CHECK: @load_i16
+; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI-CHECK: @load_i16
+; SI-CHECK: BUFFER_LOAD_USHORT
+define void @load_i16(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+entry:
+  %0 = load i16	 addrspace(1)* %in
+  %1 = zext i16 %0 to i32
+  store i32 %1, i32 addrspace(1)* %out
+  ret void
+}
+
+; R600-CHECK: @load_i16_sext
+; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
+; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
+; R600-CHECK: 16
+; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
+; R600-CHECK: 16
+; SI-CHECK: @load_i16_sext
+; SI-CHECK: BUFFER_LOAD_SSHORT
+define void @load_i16_sext(i32 addrspace(1)* %out, i16 addrspace(1)* %in) {
+entry:
+  %0 = load i16 addrspace(1)* %in
+  %1 = sext i16 %0 to i32
+  store i32 %1, i32 addrspace(1)* %out
+  ret void
+}
+
 ; load an i32 value from the global address space.
 ; R600-CHECK: @load_i32
 ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
diff --git a/test/CodeGen/R600/short-args.ll b/test/CodeGen/R600/short-args.ll
index 69a8412..20d0ae4 100644
--- a/test/CodeGen/R600/short-args.ll
+++ b/test/CodeGen/R600/short-args.ll
@@ -1,8 +1,10 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
 
-; CHECK: @i8_arg
-; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; EG-CHECK: @i8_arg
+; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI-CHECK: BUFFER_LOAD_UBYTE
 
 define void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
 entry:
@@ -11,8 +13,9 @@ entry:
   ret void
 }
 
-; CHECK: @i8_zext_arg
-; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; EG-CHECK: @i8_zext_arg
+; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
 
 define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
 entry:
@@ -21,8 +24,10 @@ entry:
   ret void
 }
 
-; CHECK: @i8_sext_arg
-; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; EG-CHECK: @i8_sext_arg
+; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
+
 define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
 entry:
   %0 = sext i8 %in to i32
@@ -30,8 +35,9 @@ entry:
   ret void
 }
 
-; CHECK: @i16_arg
-; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; EG-CHECK: @i16_arg
+; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI-CHECK: BUFFER_LOAD_USHORT
 
 define void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
 entry:
@@ -40,8 +46,9 @@ entry:
   ret void
 }
 
-; CHECK: @i16_zext_arg
-; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; EG-CHECK: @i16_zext_arg
+; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
 
 define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
 entry:
@@ -50,8 +57,9 @@ entry:
   ret void
 }
 
-; CHECK: @i16_sext_arg
-; CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; EG-CHECK: @i16_sext_arg
+; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
+; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
 
 define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
 entry:
-- 
1.7.11.4

-------------- next part --------------
>From d4eb3d2447d052403e98aa5cb4b24579524046b5 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Tue, 18 Jun 2013 08:21:33 -0700
Subject: [PATCH 09/10] R600: Add support for 24-bit MUL instructions

---
 lib/Target/R600/AMDGPUISelDAGToDAG.cpp | 50 +++++++++++++++++++++++++-
 lib/Target/R600/AMDGPUInstructions.td  | 13 +++++++
 lib/Target/R600/R600Instructions.td    |  7 ++++
 lib/Target/R600/SIInstructions.td      | 10 +++---
 test/CodeGen/R600/mul_int24.ll         | 19 ++++++++++
 test/CodeGen/R600/mul_uint24.ll        | 65 ++++++++++++++++++++++++++++++++++
 6 files changed, 159 insertions(+), 5 deletions(-)
 create mode 100644 test/CodeGen/R600/mul_int24.ll
 create mode 100644 test/CodeGen/R600/mul_uint24.ll

diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
index fed044a..f90b8d8 100644
--- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
+++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -58,6 +58,9 @@ private:
   bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
   bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
   bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
+  SDValue SimplifyI24(SDValue &Op);
+  bool SelectI24(SDValue Addr, SDValue &Op);
+  bool SelectU24(SDValue Addr, SDValue &Op);
 
   static bool checkType(const Value *ptr, unsigned int addrspace);
 
@@ -669,7 +672,9 @@ const char *AMDGPUDAGToDAGISel::getPassName() const {
 #endif
 #undef DEBUGTMP
 
-///==== AMDGPU Functions ====///
+//===----------------------------------------------------------------------===//
+// Complex Patterns
+//===----------------------------------------------------------------------===//
 
 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
     SDValue& IntPtr) {
@@ -736,6 +741,49 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
   return true;
 }
 
+SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
+  APInt Demanded = APInt(32, 0x00FFFFFF);
+  APInt KnownZero, KnownOne;
+  TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
+  const TargetLowering *TLI = getTargetLowering();
+  if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
+    CurDAG->ReplaceAllUsesWith(Op, TLO.New);
+    CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
+    return SimplifyI24(TLO.New);
+  } else {
+    return  Op;
+  }
+}
+
+bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
+
+  assert(Op.getValueType() == MVT::i32);
+
+  if (CurDAG->ComputeNumSignBits(Op) == 9) {
+    I24 = SimplifyI24(Op);
+    return true;
+  }
+  return false;
+}
+
+bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
+  APInt KnownZero;
+  APInt KnownOne;
+  CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
+
+  assert (Op.getValueType() == MVT::i32);
+
+  // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
+  // i32.  These smaller types are legal to use with the i24 instructions.
+  if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
+       Op.getOpcode() == ISD::ANY_EXTEND ||
+       ISD::isEXTLoad(Op.getNode())) {
+    U24 = SimplifyI24(Op);
+    return true;
+  }
+  return false;
+}
+
 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
 
   if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/R600/AMDGPUInstructions.td
index 56f28e9..3202d08 100644
--- a/lib/Target/R600/AMDGPUInstructions.td
+++ b/lib/Target/R600/AMDGPUInstructions.td
@@ -150,6 +150,9 @@ def FP_ONE : PatLeaf <
   [{return N->isExactlyValue(1.0);}]
 >;
 
+def U24 : ComplexPattern<i32, 1, "SelectU24", [], []>;
+def I24 : ComplexPattern<i32, 1, "SelectI24", [], []>;
+
 let isCodeGenOnly = 1, isPseudo = 1 in {
 
 let usesCustomInserter = 1  in {
@@ -343,6 +346,16 @@ class ROTRPattern <Instruction BIT_ALIGN> : Pat <
   (BIT_ALIGN $src0, $src0, $src1)
 >;
 
+// 24-bit arithmetic patterns
+def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
+
+/*
+class UMUL24Pattern <Instruction UMUL24> : Pat <
+  (mul U24:$x, U24:$y),
+  (UMUL24 $x, $y)
+>;
+*/
+
 include "R600Instructions.td"
 
 include "SIInstrInfo.td"
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index 2b5b37b..33ef772 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -1479,6 +1479,9 @@ let Predicates = [isEGorCayman] in {
   def CNDGE_eg : CNDGE_Common<0x1B>;
   def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
   def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
+  def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24",
+    [(set i32:$dst, (mul U24:$src0, U24:$src1))], VecALU
+  >;
   def DOT4_eg : DOT4_Common<0xBE>;
   defm CUBE_eg : CUBE_Common<0xC0>;
 
@@ -1602,6 +1605,10 @@ defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
 
 let Predicates = [isCayman] in {
 
+def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24",
+  [(set i32:$dst, (mul I24:$src0, I24:$src1))], VecALU
+>;
+
 let isVector = 1 in {
 
 def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 9086628..3d81372 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -836,14 +836,16 @@ defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32",
   [(set f32:$dst, (fmul f32:$src0, f32:$src1))]
 >;
 
-} // End isCommutable = 1
 
-//defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", []>;
+defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24",
+  [(set i32:$dst, (mul I24:$src0, I24:$src1))]
+>;
 //defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>;
-//defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", []>;
+defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24",
+  [(set i32:$dst, (mul U24:$src0, U24:$src1))]
+>;
 //defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>;
 
-let isCommutable = 1 in {
 
 defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32",
   [(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))]
diff --git a/test/CodeGen/R600/mul_int24.ll b/test/CodeGen/R600/mul_int24.ll
new file mode 100644
index 0000000..a0cdb6f
--- /dev/null
+++ b/test/CodeGen/R600/mul_int24.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
+
+; EG-CHECK: @i32_mul24
+; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
+; EG-CHECK: MULLO_INT
+; CM-CHECK: MUL_INT24 {{[ *]*}}T{{[0-9].[XYZW]}}, KC0[2].Z, KC0[2].W
+; SI-CHECK: V_MUL_I32_I24 
+define void @i32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+entry:
+  %0 = shl i32 %a, 8
+  %a_24 = ashr i32 %0, 8
+  %1 = shl i32 %b, 8
+  %b_24 = ashr i32 %1, 8
+  %2 = mul i32 %a_24, %b_24
+  store i32 %2, i32 addrspace(1)* %out
+  ret void
+}
diff --git a/test/CodeGen/R600/mul_uint24.ll b/test/CodeGen/R600/mul_uint24.ll
new file mode 100644
index 0000000..b1a7f94
--- /dev/null
+++ b/test/CodeGen/R600/mul_uint24.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
+
+; EG-CHECK: @u32_mul24
+; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
+; SI-CHECK: @u32_mul24
+; SI-CHECK: V_MUL_U32_U24
+
+define void @u32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+entry:
+  %0 = shl i32 %a, 8
+  %a_24 = lshr i32 %0, 8
+  %1 = shl i32 %b, 8
+  %b_24 = lshr i32 %1, 8
+  %2 = mul i32 %a_24, %b_24
+  store i32 %2, i32 addrspace(1)* %out
+  ret void
+}
+
+; EG-CHECK: @i16_mul24
+; EG-CHECK-DAG: VTX_READ_16 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
+; EG-CHECK-DAG: VTX_READ_16 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
+; The order of A and B does not matter.
+; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]]
+; The result must be sign-extended
+; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x
+; EG-CHECK: 16
+; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
+; EG-CHECK: 16
+; SI-CHECK: @i16_mul24
+; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
+; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 16, [[MUL]]
+; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 16, [[LSHL]]
+
+define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
+entry:
+  %0 = mul i16 %a, %b
+  %1 = sext i16 %0 to i32
+  store i32 %1, i32 addrspace(1)* %out
+  ret void
+}
+
+; EG-CHECK: @i8_mul24
+; EG-CHECK-DAG: VTX_READ_8 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
+; EG-CHECK-DAG: VTX_READ_8 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
+; The order of A and B does not matter.
+; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]]
+; The result must be sign-extended
+; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x
+; EG-CHECK: 24
+; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
+; EG-CHECK: 24
+; SI-CHECK: @i8_mul24
+; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
+; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 24, [[MUL]]
+; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 24, [[LSHL]]
+
+define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
+entry:
+  %0 = mul i8 %a, %b
+  %1 = sext i8 %0 to i32
+  store i32 %1, i32 addrspace(1)* %out
+  ret void
+}
-- 
1.7.11.4

-------------- next part --------------
>From c03cb829ef21f8c867dc2f60b769bf84b378ca45 Mon Sep 17 00:00:00 2001
From: Tom Stellard <thomas.stellard at amd.com>
Date: Mon, 24 Jun 2013 11:27:19 -0700
Subject: [PATCH 10/10] R600: Add support for 24-bit MAD instructions

---
 lib/Target/R600/R600Instructions.td |  6 ++++
 lib/Target/R600/SIInstructions.td   |  8 +++--
 test/CodeGen/R600/mad_int24.ll      | 20 +++++++++++
 test/CodeGen/R600/mad_uint24.ll     | 70 +++++++++++++++++++++++++++++++++++++
 4 files changed, 102 insertions(+), 2 deletions(-)
 create mode 100644 test/CodeGen/R600/mad_int24.ll
 create mode 100644 test/CodeGen/R600/mad_uint24.ll

diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index 33ef772..0c8fe10 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -1466,6 +1466,9 @@ let Predicates = [isEGorCayman] in {
   def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", [], VecALU>;
   defm : BFIPatterns <BFI_INT_eg>;
 
+  def MULADD_UINT24_eg : R600_3OP <0x10, "MULADD_UINT24",
+    [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))], VecALU
+  >;
   def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>;
   def : ROTRPattern <BIT_ALIGN_INT_eg>;
 
@@ -1605,6 +1608,9 @@ defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
 
 let Predicates = [isCayman] in {
 
+def MULADD_INT24_cm : R600_3OP <0x08, "MULADD_INT24",
+  [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))], VecALU
+>;
 def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24",
   [(set i32:$dst, (mul I24:$src0, I24:$src1))], VecALU
 >;
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/R600/SIInstructions.td
index 3d81372..c4340ef 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/R600/SIInstructions.td
@@ -953,8 +953,12 @@ let neverHasSideEffects = 1 in {
 
 def V_MAD_LEGACY_F32 : VOP3_32 <0x00000140, "V_MAD_LEGACY_F32", []>;
 def V_MAD_F32 : VOP3_32 <0x00000141, "V_MAD_F32", []>;
-//def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24", []>;
-//def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24", []>;
+def V_MAD_I32_I24 : VOP3_32 <0x00000142, "V_MAD_I32_I24",
+  [(set i32:$dst, (add (mul I24:$src0, I24:$src1), i32:$src2))]
+>;
+def V_MAD_U32_U24 : VOP3_32 <0x00000143, "V_MAD_U32_U24",
+  [(set i32:$dst, (add (mul U24:$src0, U24:$src1), i32:$src2))]
+>;
 
 } // End neverHasSideEffects
 def V_CUBEID_F32 : VOP3_32 <0x00000144, "V_CUBEID_F32", []>;
diff --git a/test/CodeGen/R600/mad_int24.ll b/test/CodeGen/R600/mad_int24.ll
new file mode 100644
index 0000000..ce42ae7
--- /dev/null
+++ b/test/CodeGen/R600/mad_int24.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
+
+; EG-CHECK: @i32_mad24
+; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
+; EG-CHECK: MULLO_INT
+; CM-CHECK: MULADD_INT24 {{[ *]*}}T{{[0-9].[XYZW]}}, KC0[2].Z, KC0[2].W, KC0[3].X
+; SI-CHECK: V_MAD_I32_I24
+define void @i32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+entry:
+  %0 = shl i32 %a, 8
+  %a_24 = ashr i32 %0, 8
+  %1 = shl i32 %b, 8
+  %b_24 = ashr i32 %1, 8
+  %2 = mul i32 %a_24, %b_24
+  %3 = add i32 %2, %c
+  store i32 %3, i32 addrspace(1)* %out
+  ret void
+}
diff --git a/test/CodeGen/R600/mad_uint24.ll b/test/CodeGen/R600/mad_uint24.ll
new file mode 100644
index 0000000..00aa64a
--- /dev/null
+++ b/test/CodeGen/R600/mad_uint24.ll
@@ -0,0 +1,70 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
+; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
+
+; EG-CHECK: @u32_mad24
+; EG-CHECK: MULADD_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W, KC0[3].X
+; SI-CHECK: @u32_mad24
+; SI-CHECK: V_MAD_U32_U24
+
+define void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
+entry:
+  %0 = shl i32 %a, 8
+  %a_24 = lshr i32 %0, 8
+  %1 = shl i32 %b, 8
+  %b_24 = lshr i32 %1, 8
+  %2 = mul i32 %a_24, %b_24
+  %3 = add i32 %2, %c
+  store i32 %3, i32 addrspace(1)* %out
+  ret void
+}
+
+; EG-CHECK: @i16_mad24
+; EG-CHECK-DAG: VTX_READ_16 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
+; EG-CHECK-DAG: VTX_READ_16 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
+; EG-CHECK-DAG: VTX_READ_16 [[C:T[0-9]\.X]], T{{[0-9]}}.X, 48
+; The order of A and B does not matter.
+; EG-CHECK: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]], [[A]], [[B]], [[C]]
+; The result must be sign-extended
+; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MAD_CHAN]], literal.x
+; EG-CHECK: 16
+; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
+; EG-CHECK: 16
+; SI-CHECK: @i16_mad24
+; SI-CHECK: V_MAD_U32_U24 [[MAD:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
+; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 16, [[MAD]]
+; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 16, [[LSHL]]
+
+define void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
+entry:
+  %0 = mul i16 %a, %b
+  %1 = add i16 %0, %c
+  %2 = sext i16 %1 to i32
+  store i32 %2, i32 addrspace(1)* %out
+  ret void
+}
+
+; EG-CHECK: @i8_mad24
+; EG-CHECK-DAG: VTX_READ_8 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
+; EG-CHECK-DAG: VTX_READ_8 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
+; EG-CHECK-DAG: VTX_READ_8 [[C:T[0-9]\.X]], T{{[0-9]}}.X, 48
+; The order of A and B does not matter.
+; EG-CHECK: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]], [[A]], [[B]], [[C]]
+; The result must be sign-extended
+; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MAD_CHAN]], literal.x
+; EG-CHECK: 24
+; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
+; EG-CHECK: 24
+; SI-CHECK: @i8_mad24
+; SI-CHECK: V_MAD_U32_U24 [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
+; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 24, [[MUL]]
+; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 24, [[LSHL]]
+
+define void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
+entry:
+  %0 = mul i8 %a, %b
+  %1 = add i8 %0, %c
+  %2 = sext i8 %1 to i32
+  store i32 %2, i32 addrspace(1)* %out
+  ret void
+}
-- 
1.7.11.4



More information about the llvm-commits mailing list