[PATCH] R600/SI: Change formatting of printed registers.

Tom Stellard tom at stellard.net
Mon Nov 11 15:29:00 PST 2013


On Fri, Nov 08, 2013 at 05:38:26PM -0800, Matt Arsenault wrote:
> Print the range of registers used with a single letter prefix. 
> This better matches what the shader compiler produces and 
> is overall less obnoxious than concatenating all of the 
> subregister names together.
>     
> Instead of SGPR0, it will print s0. Instead of SGPR0_SGPR1, 
> it will print s[0:1] and so on.
>     
> There doesn't appear to be a straightforward way
> to get the actual register info in the InstPrinter,
> so this parses the generated name to print with the
> new syntax.
> 
> The required test changes are pretty nasty, and register 
> matching regexes are now worse. Since there isn't a way to 
> add to a variable in FileCheck, some of the tests now don't 
> check the exact number of registers used,  but I don't think that 
> will be a real problem.
> 

LGTM.

> 
> http://llvm-reviews.chandlerc.com/D2130
> 
> Files:
>   lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
>   lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
>   test/CodeGen/R600/32-bit-local-address-space.ll
>   test/CodeGen/R600/64bit-kernel-args.ll
>   test/CodeGen/R600/add.ll
>   test/CodeGen/R600/address-space.ll
>   test/CodeGen/R600/and.ll
>   test/CodeGen/R600/bfi_int.ll
>   test/CodeGen/R600/build_vector.ll
>   test/CodeGen/R600/fabs.ll
>   test/CodeGen/R600/fadd64.ll
>   test/CodeGen/R600/fcmp64.ll
>   test/CodeGen/R600/fconst64.ll
>   test/CodeGen/R600/fdiv64.ll
>   test/CodeGen/R600/fma.ll
>   test/CodeGen/R600/fmul64.ll
>   test/CodeGen/R600/fmuladd.ll
>   test/CodeGen/R600/fneg.ll
>   test/CodeGen/R600/fsqrt.ll
>   test/CodeGen/R600/fsub64.ll
>   test/CodeGen/R600/imm.ll
>   test/CodeGen/R600/indirect-addressing-si.ll
>   test/CodeGen/R600/kernel-args.ll
>   test/CodeGen/R600/llvm.SI.imageload.ll
>   test/CodeGen/R600/llvm.SI.resinfo.ll
>   test/CodeGen/R600/llvm.SI.sample-masked.ll
>   test/CodeGen/R600/llvm.SI.sample.ll
>   test/CodeGen/R600/llvm.SI.sampled.ll
>   test/CodeGen/R600/llvm.SI.tbuffer.store.ll
>   test/CodeGen/R600/load.ll
>   test/CodeGen/R600/load.vec.ll
>   test/CodeGen/R600/load64.ll
>   test/CodeGen/R600/local-memory-two-objects.ll
>   test/CodeGen/R600/local-memory.ll
>   test/CodeGen/R600/lshl.ll
>   test/CodeGen/R600/lshr.ll
>   test/CodeGen/R600/mad_uint24.ll
>   test/CodeGen/R600/mul.ll
>   test/CodeGen/R600/mul_uint24.ll
>   test/CodeGen/R600/mulhu.ll
>   test/CodeGen/R600/or.ll
>   test/CodeGen/R600/rotr.ll
>   test/CodeGen/R600/seto.ll
>   test/CodeGen/R600/setuo.ll
>   test/CodeGen/R600/sgpr-copy.ll
>   test/CodeGen/R600/shl.ll
>   test/CodeGen/R600/si-lod-bias.ll
>   test/CodeGen/R600/sra.ll
>   test/CodeGen/R600/srl.ll
>   test/CodeGen/R600/sub.ll
>   test/CodeGen/R600/trunc.ll
>   test/CodeGen/R600/unaligned-load-store.ll
>   test/CodeGen/R600/work-item-intrinsics.ll
>   test/CodeGen/R600/xor.ll
>   test/CodeGen/R600/zero_extend.ll

> Index: lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
> ===================================================================
> --- lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
> +++ lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
> @@ -23,6 +23,67 @@
>    printAnnotation(OS, Annot);
>  }
>  
> +void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O) {
> +  switch (reg) {
> +  case AMDGPU::VCC:
> +    O << "vcc";
> +    return;
> +
> +  case AMDGPU::SCC:
> +    O << "scc";
> +    return;
> +
> +  case AMDGPU::EXEC:
> +    O << "exec";
> +    return;
> +
> +  case AMDGPU::M0:
> +    O << "m0";
> +    return;
> +
> +  default:
> +    break;
> +  }
> +
> +  // It's seems there's no way to use SIRegisterInfo here, and dealing with the
> +  // giant enum of all the different shifted sets of registers is pretty
> +  // unmanagable, so parse the name and reformat it to be prettier.
> +  StringRef Name(getRegisterName(reg));
> +
> +  std::pair<StringRef, StringRef> Split = Name.split('_');
> +  StringRef SubRegName = Split.first;
> +  StringRef Rest = Split.second;
> +
> +  if (SubRegName.size() <= 4) { // Must at least be as long as "SGPR"/"VGPR".
> +    O << Name;
> +    return;
> +  }
> +
> +  unsigned RegIndex;
> +  StringRef RegIndexStr = SubRegName.drop_front(4);
> +
> +  if (RegIndexStr.getAsInteger(10, RegIndex)) {
> +    O << Name;
> +    return;
> +  }
> +
> +  if (SubRegName.front() == 'V') {
> +    O << 'v';
> +  } else if (SubRegName.front() == 'S') {
> +    O << 's';
> +  } else {
> +    O << Name;
> +    return;
> +  }
> +
> +  if (Rest.empty()) // Only 1 32-bit register
> +    O << RegIndex;
> +  else {
> +    unsigned NumReg = Rest.count('_') + 2;
> +    O << '[' << RegIndex << ':' << (RegIndex + NumReg - 1) << ']';
> +  }
> +}
> +
>  void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
>                                       raw_ostream &O) {
>  
> @@ -30,8 +91,12 @@
>    if (Op.isReg()) {
>      switch (Op.getReg()) {
>      // This is the default predicate state, so we don't need to print it.
> -    case AMDGPU::PRED_SEL_OFF: break;
> -    default: O << getRegisterName(Op.getReg()); break;
> +    case AMDGPU::PRED_SEL_OFF:
> +      break;
> +
> +    default:
> +      printRegOperand(Op.getReg(), O);
> +      break;
>      }
>    } else if (Op.isImm()) {
>      O << Op.getImm();
> Index: lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
> ===================================================================
> --- lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
> +++ lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
> @@ -32,6 +32,7 @@
>    virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
>  
>  private:
> +  void printRegOperand(unsigned RegNo, raw_ostream &O);
>    void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
>    void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O);
>    void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
> Index: test/CodeGen/R600/32-bit-local-address-space.ll
> ===================================================================
> --- test/CodeGen/R600/32-bit-local-address-space.ll
> +++ test/CodeGen/R600/32-bit-local-address-space.ll
> @@ -10,7 +10,7 @@
>  ; instructions with B64, U64, and I64 take 64-bit operands.
>  
>  ; CHECK-LABEL: @local_address_load
> -; CHECK: V_MOV_B32_e{{32|64}} [[PTR:VGPR[0-9]]]
> +; CHECK: V_MOV_B32_e{{32|64}} [[PTR:v[0-9]]]
>  ; CHECK: DS_READ_B32 [[PTR]]
>  define void @local_address_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
>  entry:
> @@ -20,7 +20,7 @@
>  }
>  
>  ; CHECK-LABEL: @local_address_gep
> -; CHECK: V_ADD_I32_e{{32|64}} [[PTR:VGPR[0-9]]]
> +; CHECK: V_ADD_I32_e{{32|64}} [[PTR:v[0-9]]]
>  ; CHECK: DS_READ_B32 [[PTR]]
>  define void @local_address_gep(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %offset) {
>  entry:
> @@ -31,7 +31,7 @@
>  }
>  
>  ; CHECK-LABEL: @local_address_gep_const_offset
> -; CHECK: V_ADD_I32_e{{32|64}} [[PTR:VGPR[0-9]]]
> +; CHECK: V_ADD_I32_e{{32|64}} [[PTR:v[0-9]]]
>  ; CHECK: DS_READ_B32 [[PTR]]
>  define void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
>  entry:
> Index: test/CodeGen/R600/64bit-kernel-args.ll
> ===================================================================
> --- test/CodeGen/R600/64bit-kernel-args.ll
> +++ test/CodeGen/R600/64bit-kernel-args.ll
> @@ -1,8 +1,8 @@
>  ; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s --check-prefix=SI-CHECK
>  
>  ; SI-CHECK: @f64_kernel_arg
> -; SI-CHECK-DAG: S_LOAD_DWORDX2 SGPR{{[0-9]}}_SGPR{{[0-9]}}, SGPR0_SGPR1, 9
> -; SI-CHECK-DAG: S_LOAD_DWORDX2 SGPR{{[0-9]}}_SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; SI-CHECK-DAG: S_LOAD_DWORDX2 s[{{[0-9]:[0-9]}}], s[0:1], 9
> +; SI-CHECK-DAG: S_LOAD_DWORDX2 s[{{[0-9]:[0-9]}}], s[0:1], 11
>  ; SI-CHECK: BUFFER_STORE_DWORDX2
>  define void @f64_kernel_arg(double addrspace(1)* %out, double  %in) {
>  entry:
> Index: test/CodeGen/R600/add.ll
> ===================================================================
> --- test/CodeGen/R600/add.ll
> +++ test/CodeGen/R600/add.ll
> @@ -5,7 +5,7 @@
>  ;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK-LABEL: @test1:
> -;SI-CHECK: V_ADD_I32_e32 [[REG:VGPR[0-9]+]], {{VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_ADD_I32_e32 [[REG:v[0-9]+]], {{v[0-9]+, v[0-9]+}}
>  ;SI-CHECK-NOT: [[REG]]
>  ;SI-CHECK: BUFFER_STORE_DWORD [[REG]],
>  define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
> @@ -22,8 +22,8 @@
>  ;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK-LABEL: @test2:
> -;SI-CHECK: V_ADD_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_ADD_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> @@ -41,10 +41,10 @@
>  ;EG-CHECK: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK-LABEL: @test4:
> -;SI-CHECK: V_ADD_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_ADD_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_ADD_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_ADD_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_ADD_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> Index: test/CodeGen/R600/address-space.ll
> ===================================================================
> --- test/CodeGen/R600/address-space.ll
> +++ test/CodeGen/R600/address-space.ll
> @@ -5,8 +5,8 @@
>  %struct.foo = type { [3 x float], [3 x float] }
>  
>  ; CHECK-LABEL: @do_as_ptr_calcs:
> -; CHECK: V_ADD_I32_e64 {{VGPR[0-9]+}},
> -; CHECK: V_ADD_I32_e64 [[REG1:VGPR[0-9]+]],
> +; CHECK: V_ADD_I32_e64 {{v[0-9]+}},
> +; CHECK: V_ADD_I32_e64 [[REG1:v[0-9]+]],
>  ; CHECK: DS_READ_B32 [[REG1]],
>  define void @do_as_ptr_calcs(%struct.foo addrspace(3)* nocapture %ptr) nounwind {
>  entry:
> Index: test/CodeGen/R600/and.ll
> ===================================================================
> --- test/CodeGen/R600/and.ll
> +++ test/CodeGen/R600/and.ll
> @@ -6,8 +6,8 @@
>  ;EG-CHECK: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @test2
> -;SI-CHECK: V_AND_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_AND_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> @@ -25,10 +25,10 @@
>  ;EG-CHECK: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @test4
> -;SI-CHECK: V_AND_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_AND_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_AND_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_AND_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_AND_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> Index: test/CodeGen/R600/bfi_int.ll
> ===================================================================
> --- test/CodeGen/R600/bfi_int.ll
> +++ test/CodeGen/R600/bfi_int.ll
> @@ -38,8 +38,8 @@
>  ; R600-CHECK: @bfi_sha256_ma
>  ; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], KC0[2].Z, KC0[2].W
>  ; R600-CHECK: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
> -; SI-CHECK: V_XOR_B32_e64 [[DST:VGPR[0-9]+]], {{[SV]GPR[0-9]+, VGPR[0-9]+}}
> -; SI-CHECK: V_BFI_B32 {{VGPR[0-9]+}}, [[DST]], {{[SV]GPR[0-9]+, [SV]GPR[0-9]+}}
> +; SI-CHECK: V_XOR_B32_e64 [[DST:v[0-9]+]], {{[sv][0-9]+, v[0-9]+}}
> +; SI-CHECK: V_BFI_B32 {{v[0-9]+}}, [[DST]], {{[sv][0-9]+, [sv][0-9]+}}
>  
>  define void @bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
>  entry:
> Index: test/CodeGen/R600/build_vector.ll
> ===================================================================
> --- test/CodeGen/R600/build_vector.ll
> +++ test/CodeGen/R600/build_vector.ll
> @@ -6,9 +6,9 @@
>  ; R600-CHECK: MOV
>  ; R600-CHECK-NOT: MOV
>  ; SI-CHECK: @build_vector2
> -; SI-CHECK-DAG: V_MOV_B32_e32 [[X:VGPR[0-9]]], 5
> -; SI-CHECK-DAG: V_MOV_B32_e32 [[Y:VGPR[0-9]]], 6
> -; SI-CHECK: BUFFER_STORE_DWORDX2 [[X]]_[[Y]]
> +; SI-CHECK-DAG: V_MOV_B32_e32 v[[X:[0-9]]], 5
> +; SI-CHECK-DAG: V_MOV_B32_e32 v[[Y:[0-9]]], 6
> +; SI-CHECK: BUFFER_STORE_DWORDX2 v{{\[}}[[X]]:[[Y]]{{\]}}
>  define void @build_vector2 (<2 x i32> addrspace(1)* %out) {
>  entry:
>    store <2 x i32> <i32 5, i32 6>, <2 x i32> addrspace(1)* %out
> @@ -22,11 +22,11 @@
>  ; R600-CHECK: MOV
>  ; R600-CHECK-NOT: MOV
>  ; SI-CHECK: @build_vector4
> -; SI-CHECK-DAG: V_MOV_B32_e32 [[X:VGPR[0-9]]], 5
> -; SI-CHECK-DAG: V_MOV_B32_e32 [[Y:VGPR[0-9]]], 6
> -; SI-CHECK-DAG: V_MOV_B32_e32 [[Z:VGPR[0-9]]], 7
> -; SI-CHECK-DAG: V_MOV_B32_e32 [[W:VGPR[0-9]]], 8
> -; SI-CHECK: BUFFER_STORE_DWORDX4 [[X]]_[[Y]]_[[Z]]_[[W]]
> +; SI-CHECK-DAG: V_MOV_B32_e32 v[[X:[0-9]]], 5
> +; SI-CHECK-DAG: V_MOV_B32_e32 v[[Y:[0-9]]], 6
> +; SI-CHECK-DAG: V_MOV_B32_e32 v[[Z:[0-9]]], 7
> +; SI-CHECK-DAG: V_MOV_B32_e32 v[[W:[0-9]]], 8
> +; SI-CHECK: BUFFER_STORE_DWORDX4 v{{\[}}[[X]]:[[W]]{{\]}}
>  define void @build_vector4 (<4 x i32> addrspace(1)* %out) {
>  entry:
>    store <4 x i32> <i32 5, i32 6, i32 7, i32 8>, <4 x i32> addrspace(1)* %out
> Index: test/CodeGen/R600/fabs.ll
> ===================================================================
> --- test/CodeGen/R600/fabs.ll
> +++ test/CodeGen/R600/fabs.ll
> @@ -9,7 +9,7 @@
>  ; R600-CHECK-NOT: AND
>  ; R600-CHECK: |PV.{{[XYZW]}}|
>  ; SI-CHECK: @fabs_free
> -; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 1, 0, 0, 0
> +; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 1, 0, 0, 0
>  
>  define void @fabs_free(float addrspace(1)* %out, i32 %in) {
>  entry:
> Index: test/CodeGen/R600/fadd64.ll
> ===================================================================
> --- test/CodeGen/R600/fadd64.ll
> +++ test/CodeGen/R600/fadd64.ll
> @@ -1,7 +1,7 @@
>  ; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @fadd_f64
> -; CHECK: V_ADD_F64 {{VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_ADD_F64 {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}, {{v[[0-9]+:[0-9]+]}}
>  
>  define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                        double addrspace(1)* %in2) {
> Index: test/CodeGen/R600/fcmp64.ll
> ===================================================================
> --- test/CodeGen/R600/fcmp64.ll
> +++ test/CodeGen/R600/fcmp64.ll
> @@ -1,7 +1,7 @@
>  ; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @flt_f64
> -; CHECK: V_CMP_LT_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_CMP_LT_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
>  
>  define void @flt_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                       double addrspace(1)* %in2) {
> @@ -14,7 +14,7 @@
>  }
>  
>  ; CHECK: @fle_f64
> -; CHECK: V_CMP_LE_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_CMP_LE_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
>  
>  define void @fle_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                       double addrspace(1)* %in2) {
> @@ -27,7 +27,7 @@
>  }
>  
>  ; CHECK: @fgt_f64
> -; CHECK: V_CMP_GT_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_CMP_GT_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
>  
>  define void @fgt_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                       double addrspace(1)* %in2) {
> @@ -40,7 +40,7 @@
>  }
>  
>  ; CHECK: @fge_f64
> -; CHECK: V_CMP_GE_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_CMP_GE_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
>  
>  define void @fge_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                       double addrspace(1)* %in2) {
> @@ -53,7 +53,7 @@
>  }
>  
>  ; CHECK: @fne_f64
> -; CHECK: V_CMP_NEQ_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_CMP_NEQ_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
>  
>  define void @fne_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                       double addrspace(1)* %in2) {
> @@ -66,7 +66,7 @@
>  }
>  
>  ; CHECK: @feq_f64
> -; CHECK: V_CMP_EQ_F64_e64 {{SGPR[0-9]+_SGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_CMP_EQ_F64_e64 {{s[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+], v[[0-9]+:[0-9]+]}}
>  
>  define void @feq_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                       double addrspace(1)* %in2) {
> Index: test/CodeGen/R600/fconst64.ll
> ===================================================================
> --- test/CodeGen/R600/fconst64.ll
> +++ test/CodeGen/R600/fconst64.ll
> @@ -1,8 +1,8 @@
>  ; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @fconst_f64
> -; CHECK: V_MOV_B32_e32 {{VGPR[0-9]+}}, 0.000000e+00
> -; CHECK-NEXT: V_MOV_B32_e32 {{VGPR[0-9]+}}, 2.312500e+00
> +; CHECK: V_MOV_B32_e32 {{v[0-9]+}}, 0.000000e+00
> +; CHECK-NEXT: V_MOV_B32_e32 {{v[0-9]+}}, 2.312500e+00
>  
>  define void @fconst_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
>     %r1 = load double addrspace(1)* %in
> Index: test/CodeGen/R600/fdiv64.ll
> ===================================================================
> --- test/CodeGen/R600/fdiv64.ll
> +++ test/CodeGen/R600/fdiv64.ll
> @@ -1,8 +1,8 @@
>  ; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @fdiv_f64
> -; CHECK: V_RCP_F64_e32 {{VGPR[0-9]+_VGPR[0-9]+}}
> -; CHECK: V_MUL_F64 {{VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_RCP_F64_e32 {{v\[[0-9]+:[0-9]+\]}}
> +; CHECK: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}
>  
>  define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                        double addrspace(1)* %in2) {
> Index: test/CodeGen/R600/fma.ll
> ===================================================================
> --- test/CodeGen/R600/fma.ll
> +++ test/CodeGen/R600/fma.ll
> @@ -1,7 +1,7 @@
>  ; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @fma_f32
> -; CHECK: V_FMA_F32 {{VGPR[0-9]+, VGPR[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +; CHECK: V_FMA_F32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
>                       float addrspace(1)* %in2, float addrspace(1)* %in3) {
> @@ -16,7 +16,7 @@
>  declare float @llvm.fma.f32(float, float, float)
>  
>  ; CHECK: @fma_f64
> -; CHECK: V_FMA_F64 {{VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_FMA_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
>  
>  define void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                       double addrspace(1)* %in2, double addrspace(1)* %in3) {
> Index: test/CodeGen/R600/fmul64.ll
> ===================================================================
> --- test/CodeGen/R600/fmul64.ll
> +++ test/CodeGen/R600/fmul64.ll
> @@ -1,7 +1,7 @@
>  ; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @fmul_f64
> -; CHECK: V_MUL_F64 {{VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_MUL_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
>  
>  define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                        double addrspace(1)* %in2) {
> Index: test/CodeGen/R600/fmuladd.ll
> ===================================================================
> --- test/CodeGen/R600/fmuladd.ll
> +++ test/CodeGen/R600/fmuladd.ll
> @@ -1,7 +1,7 @@
>  ; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @fmuladd_f32
> -; CHECK: V_MAD_F32 {{VGPR[0-9]+, VGPR[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +; CHECK: V_MAD_F32 {{v[0-9]+, v[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @fmuladd_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
>                           float addrspace(1)* %in2, float addrspace(1)* %in3) {
> @@ -16,7 +16,7 @@
>  declare float @llvm.fmuladd.f32(float, float, float)
>  
>  ; CHECK: @fmuladd_f64
> -; CHECK: V_FMA_F64 {{VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_FMA_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
>  
>  define void @fmuladd_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                           double addrspace(1)* %in2, double addrspace(1)* %in3) {
> Index: test/CodeGen/R600/fneg.ll
> ===================================================================
> --- test/CodeGen/R600/fneg.ll
> +++ test/CodeGen/R600/fneg.ll
> @@ -16,10 +16,10 @@
>  ; R600-CHECK: -PV
>  ; R600-CHECK: -PV
>  ; SI-CHECK-LABEL: @fneg_v4
> -; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 0, 0, 0, 1
> -; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 0, 0, 0, 1
> -; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 0, 0, 0, 1
> -; SI-CHECK: V_ADD_F32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 0, 0, 0, 0, 1
> +; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
> +; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
> +; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
> +; SI-CHECK: V_ADD_F32_e64 v{{[0-9]}}, s{{[0-9]}}, 0, 0, 0, 0, 1
>  define void @fneg_v4(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) {
>  entry:
>    %0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %in
> Index: test/CodeGen/R600/fsqrt.ll
> ===================================================================
> --- test/CodeGen/R600/fsqrt.ll
> +++ test/CodeGen/R600/fsqrt.ll
> @@ -1,7 +1,7 @@
>  ; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @fsqrt_f32
> -; CHECK: V_SQRT_F32_e32 {{VGPR[0-9]+, VGPR[0-9]+}}
> +; CHECK: V_SQRT_F32_e32 {{v[0-9]+, v[0-9]+}}
>  
>  define void @fsqrt_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
>     %r0 = load float addrspace(1)* %in
> @@ -11,7 +11,7 @@
>  }
>  
>  ; CHECK: @fsqrt_f64
> -; CHECK: V_SQRT_F64_e32 {{VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}
> +; CHECK: V_SQRT_F64_e32 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}
>  
>  define void @fsqrt_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
>     %r0 = load double addrspace(1)* %in
> Index: test/CodeGen/R600/fsub64.ll
> ===================================================================
> --- test/CodeGen/R600/fsub64.ll
> +++ test/CodeGen/R600/fsub64.ll
> @@ -1,7 +1,7 @@
>  ; RUN: llc < %s -march=r600 -mcpu=tahiti -verify-machineinstrs | FileCheck %s
>  
>  ; CHECK: @fsub_f64
> -; CHECK: V_ADD_F64 {{VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+, VGPR[0-9]+_VGPR[0-9]+}}, 0, 0, 0, 0, 2
> +; CHECK: V_ADD_F64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\]}}, 0, 0, 0, 0, 2
>  
>  define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
>                        double addrspace(1)* %in2) {
> Index: test/CodeGen/R600/imm.ll
> ===================================================================
> --- test/CodeGen/R600/imm.ll
> +++ test/CodeGen/R600/imm.ll
> @@ -2,9 +2,9 @@
>  
>  ; Use a 64-bit value with lo bits that can be represented as an inline constant
>  ; CHECK: @i64_imm_inline_lo
> -; CHECK: S_MOV_B32 [[LO:SGPR[0-9]+]], 5
> -; CHECK: V_MOV_B32_e32 [[LO_VGPR:VGPR[0-9]+]], [[LO]]
> -; CHECK: BUFFER_STORE_DWORDX2 [[LO_VGPR]]_
> +; CHECK: S_MOV_B32 [[LO:s[0-9]+]], 5
> +; CHECK: V_MOV_B32_e32 v[[LO_VGPR:[0-9]+]], [[LO]]
> +; CHECK: BUFFER_STORE_DWORDX2 v{{\[}}[[LO_VGPR]]:
>  define void @i64_imm_inline_lo(i64 addrspace(1) *%out) {
>  entry:
>    store i64 1311768464867721221, i64 addrspace(1) *%out ; 0x1234567800000005
> @@ -13,9 +13,9 @@
>  
>  ; Use a 64-bit value with hi bits that can be represented as an inline constant
>  ; CHECK: @i64_imm_inline_hi
> -; CHECK: S_MOV_B32 [[HI:SGPR[0-9]+]], 5
> -; CHECK: V_MOV_B32_e32 [[HI_VGPR:VGPR[0-9]+]], [[HI]]
> -; CHECK: BUFFER_STORE_DWORDX2 {{VGPR[0-9]+}}_[[HI_VGPR]]
> +; CHECK: S_MOV_B32 [[HI:s[0-9]+]], 5
> +; CHECK: V_MOV_B32_e32 v[[HI_VGPR:[0-9]+]], [[HI]]
> +; CHECK: BUFFER_STORE_DWORDX2 v{{\[[0-9]+:}}[[HI_VGPR]]
>  define void @i64_imm_inline_hi(i64 addrspace(1) *%out) {
>  entry:
>    store i64 21780256376, i64 addrspace(1) *%out ; 0x0000000512345678
> Index: test/CodeGen/R600/indirect-addressing-si.ll
> ===================================================================
> --- test/CodeGen/R600/indirect-addressing-si.ll
> +++ test/CodeGen/R600/indirect-addressing-si.ll
> @@ -4,7 +4,7 @@
>  ; indexing of vectors.
>  
>  ; CHECK: extract_w_offset
> -; CHECK: S_MOV_B32 M0
> +; CHECK: S_MOV_B32 m0
>  ; CHECK-NEXT: V_MOVRELS_B32_e32
>  define void @extract_w_offset(float addrspace(1)* %out, i32 %in) {
>  entry:
> @@ -15,7 +15,7 @@
>  }
>  
>  ; CHECK: extract_wo_offset
> -; CHECK: S_MOV_B32 M0
> +; CHECK: S_MOV_B32 m0
>  ; CHECK-NEXT: V_MOVRELS_B32_e32
>  define void @extract_wo_offset(float addrspace(1)* %out, i32 %in) {
>  entry:
> @@ -25,7 +25,7 @@
>  }
>  
>  ; CHECK: insert_w_offset
> -; CHECK: S_MOV_B32 M0
> +; CHECK: S_MOV_B32 m0
>  ; CHECK-NEXT: V_MOVRELD_B32_e32
>  define void @insert_w_offset(float addrspace(1)* %out, i32 %in) {
>  entry:
> @@ -37,7 +37,7 @@
>  }
>  
>  ; CHECK: insert_wo_offset
> -; CHECK: S_MOV_B32 M0
> +; CHECK: S_MOV_B32 m0
>  ; CHECK-NEXT: V_MOVRELD_B32_e32
>  define void @insert_wo_offset(float addrspace(1)* %out, i32 %in) {
>  entry:
> Index: test/CodeGen/R600/kernel-args.ll
> ===================================================================
> --- test/CodeGen/R600/kernel-args.ll
> +++ test/CodeGen/R600/kernel-args.ll
> @@ -17,7 +17,7 @@
>  ; EG-CHECK-LABEL: @i8_zext_arg
>  ; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
>  ; SI-CHECK-LABEL: @i8_zext_arg
> -; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
>  
>  define void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
>  entry:
> @@ -29,7 +29,7 @@
>  ; EG-CHECK-LABEL: @i8_sext_arg
>  ; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
>  ; SI-CHECK-LABEL: @i8_sext_arg
> -; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
>  
>  define void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
>  entry:
> @@ -53,7 +53,7 @@
>  ; EG-CHECK-LABEL: @i16_zext_arg
>  ; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
>  ; SI-CHECK-LABEL: @i16_zext_arg
> -; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
>  
>  define void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
>  entry:
> @@ -65,7 +65,7 @@
>  ; EG-CHECK-LABEL: @i16_sext_arg
>  ; EG-CHECK: MOV {{[ *]*}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z
>  ; SI-CHECK-LABEL: @i16_sext_arg
> -; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; SI-CHECK: S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
>  
>  define void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
>  entry:
> @@ -77,7 +77,7 @@
>  ; EG-CHECK-LABEL: @i32_arg
>  ; EG-CHECK: T{{[0-9]\.[XYZW]}}, KC0[2].Z
>  ; SI-CHECK-LABEL: @i32_arg
> -; S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
>  define void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) nounwind {
>  entry:
>    store i32 %in, i32 addrspace(1)* %out, align 4
> @@ -87,7 +87,7 @@
>  ; EG-CHECK-LABEL: @f32_arg
>  ; EG-CHECK: T{{[0-9]\.[XYZW]}}, KC0[2].Z
>  ; SI-CHECK-LABEL: @f32_arg
> -; S_LOAD_DWORD SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; S_LOAD_DWORD s{{[0-9]}}, s[0:1], 11
>  define void @f32_arg(float addrspace(1)* nocapture %out, float %in) nounwind {
>  entry:
>    store float %in, float addrspace(1)* %out, align 4
> @@ -122,7 +122,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
>  ; SI-CHECK-LABEL: @v2i32_arg
> -; SI-CHECK: S_LOAD_DWORDX2 SGPR{{[0-9]}}_SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; SI-CHECK: S_LOAD_DWORDX2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 11
>  define void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2 x i32> %in) nounwind {
>  entry:
>    store <2 x i32> %in, <2 x i32> addrspace(1)* %out, align 4
> @@ -133,7 +133,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
>  ; SI-CHECK-LABEL: @v2f32_arg
> -; SI-CHECK: S_LOAD_DWORDX2 SGPR{{[0-9]}}_SGPR{{[0-9]}}, SGPR0_SGPR1, 11
> +; SI-CHECK: S_LOAD_DWORDX2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 11
>  define void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) nounwind {
>  entry:
>    store <2 x float> %in, <2 x float> addrspace(1)* %out, align 4
> @@ -166,7 +166,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
>  ; SI-CHECK-LABEL: @v3i32_arg
> -; SI-CHECK: S_LOAD_DWORDX4 SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}, SGPR0_SGPR1, 13
> +; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 13
>  define void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind {
>  entry:
>    store <3 x i32> %in, <3 x i32> addrspace(1)* %out, align 4
> @@ -178,7 +178,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
>  ; SI-CHECK-LABEL: @v3f32_arg
> -; SI-CHECK: S_LOAD_DWORDX4 SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}, SGPR0_SGPR1, 13
> +; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 13
>  define void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind {
>  entry:
>    store <3 x float> %in, <3 x float> addrspace(1)* %out, align 4
> @@ -223,7 +223,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
>  ; SI-CHECK-LABEL: @v4i32_arg
> -; SI-CHECK: S_LOAD_DWORDX4 SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}, SGPR0_SGPR1, 13
> +; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 13
>  define void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4 x i32> %in) nounwind {
>  entry:
>    store <4 x i32> %in, <4 x i32> addrspace(1)* %out, align 4
> @@ -236,7 +236,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
>  ; SI-CHECK-LABEL: @v4f32_arg
> -; SI-CHECK: S_LOAD_DWORDX4 SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}, SGPR0_SGPR1, 13
> +; SI-CHECK: S_LOAD_DWORDX4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 13
>  define void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) nounwind {
>  entry:
>    store <4 x float> %in, <4 x float> addrspace(1)* %out, align 4
> @@ -300,7 +300,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
>  ; SI-CHECK-LABEL: @v8i32_arg
> -; SI-CHECK: S_LOAD_DWORDX8 SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}, SGPR0_SGPR1, 17
> +; SI-CHECK: S_LOAD_DWORDX8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 17
>  define void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8 x i32> %in) nounwind {
>  entry:
>    store <8 x i32> %in, <8 x i32> addrspace(1)* %out, align 4
> @@ -317,7 +317,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
>  ; SI-CHECK-LABEL: @v8f32_arg
> -; SI-CHECK: S_LOAD_DWORDX8 SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}, SGPR0_SGPR1, 17
> +; SI-CHECK: S_LOAD_DWORDX8 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 17
>  define void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <8 x float> %in) nounwind {
>  entry:
>    store <8 x float> %in, <8 x float> addrspace(1)* %out, align 4
> @@ -422,7 +422,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
>  ; SI-CHECK-LABEL: @v16i32_arg
> -; SI-CHECK: S_LOAD_DWORDX16 SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}, SGPR0_SGPR1, 25
> +; SI-CHECK: S_LOAD_DWORDX16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 25
>  define void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <16 x i32> %in) nounwind {
>  entry:
>    store <16 x i32> %in, <16 x i32> addrspace(1)* %out, align 4
> @@ -447,7 +447,7 @@
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
>  ; EG-CHECK-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
>  ; SI-CHECK-LABEL: @v16f32_arg
> -; SI-CHECK: S_LOAD_DWORDX16 SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}_SGPR{{[0-9]+}}, SGPR0_SGPR1, 25
> +; SI-CHECK: S_LOAD_DWORDX16 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 25
>  define void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out, <16 x float> %in) nounwind {
>  entry:
>    store <16 x float> %in, <16 x float> addrspace(1)* %out, align 4
> Index: test/CodeGen/R600/llvm.SI.imageload.ll
> ===================================================================
> --- test/CodeGen/R600/llvm.SI.imageload.ll
> +++ test/CodeGen/R600/llvm.SI.imageload.ll
> @@ -1,15 +1,15 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK-DAG: IMAGE_LOAD {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 15, 0, 0, -1
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+_VGPR[0-9]+}}, 3, 0, 0, 0
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 2, 0, 0, 0
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 1, 0, 0, 0
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 4, 0, 0, 0
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 8, 0, 0, 0
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+_VGPR[0-9]+}}, 5, 0, 0, 0
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+_VGPR[0-9]+}}, 12, 0, 0, -1
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 7, 0, 0, 0
> -;CHECK-DAG: IMAGE_LOAD_MIP {{VGPR[0-9]+}}, 8, 0, 0, -1
> +;CHECK-DAG: IMAGE_LOAD {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, -1
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, 0
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v[0-9]+}}, 2, 0, 0, 0
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v[0-9]+}}, 1, 0, 0, 0
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v[0-9]+}}, 4, 0, 0, 0
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v[0-9]+}}, 8, 0, 0, 0
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v\[[0-9]+:[0-9]+\]}}, 5, 0, 0, 0
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v\[[0-9]+:[0-9]+\]}}, 12, 0, 0, -1
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v\[[0-9]+:[0-9]+\]}}, 7, 0, 0, 0
> +;CHECK-DAG: IMAGE_LOAD_MIP {{v[0-9]+}}, 8, 0, 0, -1
>  
>  define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
>     %v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
> @@ -84,7 +84,7 @@
>  
>  ; Test that ccordinates are stored in vgprs and not sgprs
>  ; CHECK: vgpr_coords
> -; CHECK: IMAGE_LOAD_MIP VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 15, 0, 0, 0, 0, 0, 0, 0, VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}
> +; CHECK: IMAGE_LOAD_MIP {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}
>  define void @vgpr_coords(float addrspace(2)* addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
>  main_body:
>    %20 = getelementptr float addrspace(2)* addrspace(2)* %0, i32 0
> Index: test/CodeGen/R600/llvm.SI.resinfo.ll
> ===================================================================
> --- test/CodeGen/R600/llvm.SI.resinfo.ll
> +++ test/CodeGen/R600/llvm.SI.resinfo.ll
> @@ -1,21 +1,21 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 15, 0, 0, -1
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+}}, 3, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+}}, 2, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+}}, 1, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+}}, 4, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+}}, 8, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+}}, 5, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+}}, 9, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+}}, 6, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+}}, 10, 0, 0, -1
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+}}, 12, 0, 0, -1
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 7, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 11, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 13, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 14, 0, 0, 0
> -;CHECK: IMAGE_GET_RESINFO {{VGPR[0-9]+}}, 8, 0, 0, -1
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, -1
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 3, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v[0-9]+}}, 2, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v[0-9]+}}, 1, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v[0-9]+}}, 4, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v[0-9]+}}, 8, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 5, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 9, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 6, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 10, 0, 0, -1
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 12, 0, 0, -1
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 7, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 11, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 13, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v\[[0-9]+:[0-9]+\]}}, 14, 0, 0, 0
> +;CHECK: IMAGE_GET_RESINFO {{v[0-9]+}}, 8, 0, 0, -1
>  
>  define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8,
>  		  i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15, i32 %a16) {
> Index: test/CodeGen/R600/llvm.SI.sample-masked.ll
> ===================================================================
> --- test/CodeGen/R600/llvm.SI.sample-masked.ll
> +++ test/CodeGen/R600/llvm.SI.sample-masked.ll
> @@ -1,7 +1,7 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
>  
>  ; CHECK-LABEL: @v1
> -; CHECK: IMAGE_SAMPLE VGPR{{[[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 13
> +; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 13
>  define void @v1(i32 %a1) {
>  entry:
>    %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
> @@ -14,7 +14,7 @@
>  }
>  
>  ; CHECK-LABEL: @v2
> -; CHECK: IMAGE_SAMPLE VGPR{{[[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 11
> +; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 11
>  define void @v2(i32 %a1) {
>  entry:
>    %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
> @@ -27,7 +27,7 @@
>  }
>  
>  ; CHECK-LABEL: @v3
> -; CHECK: IMAGE_SAMPLE VGPR{{[[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 14
> +; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 14
>  define void @v3(i32 %a1) {
>  entry:
>    %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
> @@ -40,7 +40,7 @@
>  }
>  
>  ; CHECK-LABEL: @v4
> -; CHECK: IMAGE_SAMPLE VGPR{{[[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 7
> +; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 7
>  define void @v4(i32 %a1) {
>  entry:
>    %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
> @@ -53,7 +53,7 @@
>  }
>  
>  ; CHECK-LABEL: @v5
> -; CHECK: IMAGE_SAMPLE VGPR{{[[0-9]}}_VGPR{{[0-9]}}, 10
> +; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 10
>  define void @v5(i32 %a1) {
>  entry:
>    %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
> @@ -65,7 +65,7 @@
>  }
>  
>  ; CHECK-LABEL: @v6
> -; CHECK: IMAGE_SAMPLE VGPR{{[[0-9]}}_VGPR{{[0-9]}}, 6
> +; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 6
>  define void @v6(i32 %a1) {
>  entry:
>    %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
> @@ -77,7 +77,7 @@
>  }
>  
>  ; CHECK-LABEL: @v7
> -; CHECK: IMAGE_SAMPLE VGPR{{[[0-9]}}_VGPR{{[0-9]}}, 9
> +; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 9
>  define void @v7(i32 %a1) {
>  entry:
>    %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
> Index: test/CodeGen/R600/llvm.SI.sample.ll
> ===================================================================
> --- test/CodeGen/R600/llvm.SI.sample.ll
> +++ test/CodeGen/R600/llvm.SI.sample.ll
> @@ -1,21 +1,21 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 15
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+}}, 3
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+}}, 2
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+}}, 1
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+}}, 4
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+}}, 8
> -;CHECK-DAG: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+}}, 5
> -;CHECK-DAG: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+}}, 9
> -;CHECK-DAG: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+}}, 6
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+}}, 10
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+}}, 12
> -;CHECK-DAG: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 7
> -;CHECK-DAG: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 11
> -;CHECK-DAG: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 13
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 14
> -;CHECK-DAG: IMAGE_SAMPLE {{VGPR[0-9]+}}, 8
> +;CHECK-DAG: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 15
> +;CHECK-DAG: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 3
> +;CHECK-DAG: IMAGE_SAMPLE {{v[0-9]+}}, 2
> +;CHECK-DAG: IMAGE_SAMPLE {{v[0-9]+}}, 1
> +;CHECK-DAG: IMAGE_SAMPLE {{v[0-9]+}}, 4
> +;CHECK-DAG: IMAGE_SAMPLE {{v[0-9]+}}, 8
> +;CHECK-DAG: IMAGE_SAMPLE_C {{v\[[0-9]+:[0-9]+\]}}, 5
> +;CHECK-DAG: IMAGE_SAMPLE_C {{v\[[0-9]+:[0-9]+\]}}, 9
> +;CHECK-DAG: IMAGE_SAMPLE_C {{v\[[0-9]+:[0-9]+\]}}, 6
> +;CHECK-DAG: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 10
> +;CHECK-DAG: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 12
> +;CHECK-DAG: IMAGE_SAMPLE_C {{v\[[0-9]+:[0-9]+\]}}, 7
> +;CHECK-DAG: IMAGE_SAMPLE_C {{v\[[0-9]+:[0-9]+\]}}, 11
> +;CHECK-DAG: IMAGE_SAMPLE_C {{v\[[0-9]+:[0-9]+\]}}, 13
> +;CHECK-DAG: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 14
> +;CHECK-DAG: IMAGE_SAMPLE {{v[0-9]+}}, 8
>  
>  define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
>     %v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
> @@ -136,7 +136,7 @@
>  }
>  
>  ; CHECK: @v1
> -; CHECK: IMAGE_SAMPLE VGPR{{[[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 15
> +; CHECK: IMAGE_SAMPLE {{v\[[0-9]+:[0-9]+\]}}, 15
>  define void @v1(i32 %a1) {
>  entry:
>    %0 = insertelement <1 x i32> undef, i32 %a1, i32 0
> Index: test/CodeGen/R600/llvm.SI.sampled.ll
> ===================================================================
> --- test/CodeGen/R600/llvm.SI.sampled.ll
> +++ test/CodeGen/R600/llvm.SI.sampled.ll
> @@ -1,21 +1,21 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 15
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+}}, 3
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 2
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 1
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 4
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 8
> -;CHECK-DAG: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+}}, 5
> -;CHECK-DAG: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+}}, 9
> -;CHECK-DAG: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+}}, 6
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+}}, 10
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+}}, 12
> -;CHECK-DAG: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 7
> -;CHECK-DAG: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 11
> -;CHECK-DAG: IMAGE_SAMPLE_C_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 13
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 14
> -;CHECK-DAG: IMAGE_SAMPLE_D {{VGPR[0-9]+}}, 8
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 15
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 3
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v[0-9]+}}, 2
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v[0-9]+}}, 1
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v[0-9]+}}, 4
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v[0-9]+}}, 8
> +;CHECK-DAG: IMAGE_SAMPLE_C_D {{v\[[0-9]+:[0-9]+\]}}, 5
> +;CHECK-DAG: IMAGE_SAMPLE_C_D {{v\[[0-9]+:[0-9]+\]}}, 9
> +;CHECK-DAG: IMAGE_SAMPLE_C_D {{v\[[0-9]+:[0-9]+\]}}, 6
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 10
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 12
> +;CHECK-DAG: IMAGE_SAMPLE_C_D {{v\[[0-9]+:[0-9]+\]}}, 7
> +;CHECK-DAG: IMAGE_SAMPLE_C_D {{v\[[0-9]+:[0-9]+\]}}, 11
> +;CHECK-DAG: IMAGE_SAMPLE_C_D {{v\[[0-9]+:[0-9]+\]}}, 13
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v\[[0-9]+:[0-9]+\]}}, 14
> +;CHECK-DAG: IMAGE_SAMPLE_D {{v[0-9]+}}, 8
>  
>  define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
>     %v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
> Index: test/CodeGen/R600/llvm.SI.tbuffer.store.ll
> ===================================================================
> --- test/CodeGen/R600/llvm.SI.tbuffer.store.ll
> +++ test/CodeGen/R600/llvm.SI.tbuffer.store.ll
> @@ -1,7 +1,7 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
>  ;CHECK_LABEL: @test1
> -;CHECK: TBUFFER_STORE_FORMAT_XYZW {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 32, -1, 0, -1, 0, 14, 4, {{VGPR[0-9]+}}, {{SGPR[0-9]+_SGPR[0-9]+_SGPR[0-9]+_SGPR[0-9]+}}, -1, 0, 0
> +;CHECK: TBUFFER_STORE_FORMAT_XYZW {{v\[[0-9]+:[0-9]+\]}}, 32, -1, 0, -1, 0, 14, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
>  define void @test1(i32 %a1, i32 %vaddr) {
>      %vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
>      call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
> @@ -11,7 +11,7 @@
>  }
>  
>  ;CHECK_LABEL: @test2
> -;CHECK: TBUFFER_STORE_FORMAT_XYZ {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 24, -1, 0, -1, 0, 13, 4, {{VGPR[0-9]+}}, {{SGPR[0-9]+_SGPR[0-9]+_SGPR[0-9]+_SGPR[0-9]+}}, -1, 0, 0
> +;CHECK: TBUFFER_STORE_FORMAT_XYZ {{v\[[0-9]+:[0-9]+\]}}, 24, -1, 0, -1, 0, 13, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
>  define void @test2(i32 %a1, i32 %vaddr) {
>      %vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
>      call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
> @@ -21,7 +21,7 @@
>  }
>  
>  ;CHECK_LABEL: @test3
> -;CHECK: TBUFFER_STORE_FORMAT_XY {{VGPR[0-9]+_VGPR[0-9]+}}, 16, -1, 0, -1, 0, 11, 4, {{VGPR[0-9]+}}, {{SGPR[0-9]+_SGPR[0-9]+_SGPR[0-9]+_SGPR[0-9]+}}, -1, 0, 0
> +;CHECK: TBUFFER_STORE_FORMAT_XY {{v\[[0-9]+:[0-9]+\]}}, 16, -1, 0, -1, 0, 11, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
>  define void @test3(i32 %a1, i32 %vaddr) {
>      %vdata = insertelement <2 x i32> undef, i32 %a1, i32 0
>      call void @llvm.SI.tbuffer.store.v2i32(<16 x i8> undef, <2 x i32> %vdata,
> @@ -31,7 +31,7 @@
>  }
>  
>  ;CHECK_LABEL: @test4
> -;CHECK: TBUFFER_STORE_FORMAT_X {{VGPR[0-9]+}}, 8, -1, 0, -1, 0, 4, 4, {{VGPR[0-9]+}}, {{SGPR[0-9]+_SGPR[0-9]+_SGPR[0-9]+_SGPR[0-9]+}}, -1, 0, 0
> +;CHECK: TBUFFER_STORE_FORMAT_X {{v[0-9]+}}, 8, -1, 0, -1, 0, 4, 4, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, -1, 0, 0
>  define void @test4(i32 %vdata, i32 %vaddr) {
>      call void @llvm.SI.tbuffer.store.i32(<16 x i8> undef, i32 %vdata,
>          i32 1, i32 %vaddr, i32 0, i32 8, i32 4, i32 4, i32 1, i32 0, i32 1,
> Index: test/CodeGen/R600/load.ll
> ===================================================================
> --- test/CodeGen/R600/load.ll
> +++ test/CodeGen/R600/load.ll
> @@ -11,7 +11,7 @@
>  ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
>  
>  ; SI-CHECK: @load_i8
> -; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
> +; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
>  define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
>    %1 = load i8 addrspace(1)* %in
>    %2 = zext i8 %1 to i32
> @@ -245,7 +245,7 @@
>  ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
>  
>  ; SI-CHECK: @load_i32
> -; SI-CHECK: BUFFER_LOAD_DWORD VGPR{{[0-9]+}}
> +; SI-CHECK: BUFFER_LOAD_DWORD v{{[0-9]+}}
>  define void @load_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
>  entry:
>    %0 = load i32 addrspace(1)* %in
> @@ -258,7 +258,7 @@
>  ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
>  
>  ; SI-CHECK: @load_f32
> -; SI-CHECK: BUFFER_LOAD_DWORD VGPR{{[0-9]+}}
> +; SI-CHECK: BUFFER_LOAD_DWORD v{{[0-9]+}}
>  define void @load_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
>  entry:
>    %0 = load float addrspace(1)* %in
> @@ -298,9 +298,9 @@
>  ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}},  literal.x
>  ; R600-CHECK: 31
>  ; SI-CHECK: @load_i64_sext
> -; SI-CHECK: BUFFER_LOAD_DWORDX2 [[VAL:VGPR[0-9]_VGPR[0-9]]]
> -; SI-CHECK: V_LSHL_B64 [[LSHL:VGPR[0-9]_VGPR[0-9]]], [[VAL]], 32
> -; SI-CHECK: V_ASHR_I64 VGPR{{[0-9]}}_VGPR{{[0-9]}}, [[LSHL]], 32
> +; SI-CHECK: BUFFER_LOAD_DWORDX2 [[VAL:v\[[0-9]:[0-9]\]]]
> +; SI-CHECK: V_LSHL_B64 [[LSHL:v\[[0-9]:[0-9]\]]], [[VAL]], 32
> +; SI-CHECK: V_ASHR_I64 v{{\[[0-9]:[0-9]\]}}, [[LSHL]], 32
>  
>  define void @load_i64_sext(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
>  entry:
> @@ -333,7 +333,7 @@
>  ; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
>  ; R600-CHECK: 24
>  ; SI-CHECK: @load_const_i8_sext
> -; SI-CHECK: BUFFER_LOAD_SBYTE VGPR{{[0-9]+}},
> +; SI-CHECK: BUFFER_LOAD_SBYTE v{{[0-9]+}},
>  define void @load_const_i8_sext(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
>  entry:
>    %0 = load i8 addrspace(2)* %in
> @@ -346,7 +346,7 @@
>  ; R600-CHECK: @load_const_i8_aligned
>  ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
>  ; SI-CHECK: @load_const_i8_aligned
> -; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
> +; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
>  define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
>  entry:
>    %0 = load i8 addrspace(2)* %in
> @@ -359,7 +359,7 @@
>  ; R600-CHECK: @load_const_i8_unaligned
>  ; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
>  ; SI-CHECK: @load_const_i8_unaligned
> -; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
> +; SI-CHECK: BUFFER_LOAD_UBYTE v{{[0-9]+}},
>  define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
>  entry:
>    %0 = getelementptr i8 addrspace(2)* %in, i32 1
> @@ -418,7 +418,7 @@
>  ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
>  
>  ; SI-CHECK: @load_const_addrspace_i32
> -; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
> +; SI-CHECK: S_LOAD_DWORD s{{[0-9]+}}
>  define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
>  entry:
>    %0 = load i32 addrspace(2)* %in
> @@ -431,7 +431,7 @@
>  ; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
>  
>  ; SI-CHECK: @load_const_addrspace_f32
> -; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
> +; SI-CHECK: S_LOAD_DWORD s{{[0-9]+}}
>  define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
>    %1 = load float addrspace(2)* %in
>    store float %1, float addrspace(1)* %out
> Index: test/CodeGen/R600/load.vec.ll
> ===================================================================
> --- test/CodeGen/R600/load.vec.ll
> +++ test/CodeGen/R600/load.vec.ll
> @@ -5,7 +5,7 @@
>  ; EG-CHECK: @load_v2i32
>  ; EG-CHECK: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0
>  ; SI-CHECK: @load_v2i32
> -; SI-CHECK: BUFFER_LOAD_DWORDX2 VGPR{{[0-9]+}}
> +; SI-CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
>  define void @load_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %a = load <2 x i32> addrspace(1) * %in
>    store <2 x i32> %a, <2 x i32> addrspace(1)* %out
> @@ -16,7 +16,7 @@
>  ; EG-CHECK: @load_v4i32
>  ; EG-CHECK: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0
>  ; SI-CHECK: @load_v4i32
> -; SI-CHECK: BUFFER_LOAD_DWORDX4 VGPR{{[0-9]+}}
> +; SI-CHECK: BUFFER_LOAD_DWORDX4 v[{{[0-9]+:[0-9]+}}]
>  define void @load_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %a = load <4 x i32> addrspace(1) * %in
>    store <4 x i32> %a, <4 x i32> addrspace(1)* %out
> Index: test/CodeGen/R600/load64.ll
> ===================================================================
> --- test/CodeGen/R600/load64.ll
> +++ test/CodeGen/R600/load64.ll
> @@ -2,7 +2,7 @@
>  
>  ; load a f64 value from the global address space.
>  ; CHECK: @load_f64
> -; CHECK: BUFFER_LOAD_DWORDX2 VGPR{{[0-9]+}}
> +; CHECK: BUFFER_LOAD_DWORDX2 v[{{[0-9]+:[0-9]+}}]
>  define void @load_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
>  entry:
>    %0 = load double addrspace(1)* %in
> @@ -12,7 +12,7 @@
>  
>  ; Load a f64 value from the constant address space.
>  ; CHECK: @load_const_addrspace_f64
> -; CHECK: S_LOAD_DWORDX2 SGPR{{[0-9]+}}
> +; CHECK: S_LOAD_DWORDX2 s[{{[0-9]+:[0-9]+}}]
>  define void @load_const_addrspace_f64(double addrspace(1)* %out, double addrspace(2)* %in) {
>    %1 = load double addrspace(2)* %in
>    store double %1, double addrspace(1)* %out
> Index: test/CodeGen/R600/local-memory-two-objects.ll
> ===================================================================
> --- test/CodeGen/R600/local-memory-two-objects.ll
> +++ test/CodeGen/R600/local-memory-two-objects.ll
> @@ -15,8 +15,8 @@
>  ; Make sure the lds writes are using different addresses.
>  ; EG-CHECK: LDS_WRITE {{[*]*}} {{PV|T}}[[ADDRW:[0-9]*\.[XYZW]]]
>  ; EG-CHECK-NOT: LDS_WRITE {{[*]*}} T[[ADDRW]]
> -; SI-CHECK: DS_WRITE_B32 0, {{VGPR[0-9]*}}, VGPR[[ADDRW:[0-9]*]]
> -; SI-CHECK-NOT: DS_WRITE_B32 0, {{VGPR[0-9]*}}, VGPR[[ADDRW]]
> +; SI-CHECK: DS_WRITE_B32 0, {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
> +; SI-CHECK-NOT: DS_WRITE_B32 0, {{v[0-9]*}}, v[[ADDRW]]
>  
>  ; GROUP_BARRIER must be the last instruction in a clause
>  ; EG-CHECK: GROUP_BARRIER
> @@ -25,8 +25,8 @@
>  ; Make sure the lds reads are using different addresses.
>  ; EG-CHECK: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
>  ; EG-CHECK-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
> -; SI-CHECK: DS_READ_B32 {{VGPR[0-9]+}}, 0, [[ADDRR:VGPR[0-9]+]]
> -; SI-CHECK-NOT: DS_READ_B32 {{VGPR[0-9]+}}, 0, [[ADDRR]]
> +; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, 0, [[ADDRR:v[0-9]+]]
> +; SI-CHECK-NOT: DS_READ_B32 {{v[0-9]+}}, 0, [[ADDRR]]
>  
>  define void @local_memory_two_objects(i32 addrspace(1)* %out) {
>  entry:
> Index: test/CodeGen/R600/local-memory.ll
> ===================================================================
> --- test/CodeGen/R600/local-memory.ll
> +++ test/CodeGen/R600/local-memory.ll
> @@ -26,7 +26,7 @@
>  ; SI-CHECK: S_BARRIER
>  
>  ; EG-CHECK: LDS_READ_RET
> -; SI-CHECK: DS_READ_B32 {{VGPR[0-9]+}}, 0
> +; SI-CHECK: DS_READ_B32 {{v[0-9]+}}, 0
>  
>  define void @local_memory(i32 addrspace(1)* %out) {
>  entry:
> Index: test/CodeGen/R600/lshl.ll
> ===================================================================
> --- test/CodeGen/R600/lshl.ll
> +++ test/CodeGen/R600/lshl.ll
> @@ -1,6 +1,6 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK: V_LSHL_B32_e64 VGPR{{[0-9]}}, SGPR{{[0-9]}}, 1
> +;CHECK: V_LSHL_B32_e64 v{{[0-9]}}, s{{[0-9]}}, 1
>  
>  define void @test(i32 %p) {
>     %i = mul i32 %p, 2
> Index: test/CodeGen/R600/lshr.ll
> ===================================================================
> --- test/CodeGen/R600/lshr.ll
> +++ test/CodeGen/R600/lshr.ll
> @@ -1,6 +1,6 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK: V_LSHR_B32_e64 {{VGPR[0-9]}}, SGPR{{[0-9]}}, 1
> +;CHECK: V_LSHR_B32_e64 {{v[0-9]}}, s{{[0-9]}}, 1
>  
>  define void @test(i32 %p) {
>     %i = udiv i32 %p, 2
> Index: test/CodeGen/R600/mad_uint24.ll
> ===================================================================
> --- test/CodeGen/R600/mad_uint24.ll
> +++ test/CodeGen/R600/mad_uint24.ll
> @@ -31,9 +31,9 @@
>  ; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
>  ; EG-CHECK: 16
>  ; SI-CHECK: @i16_mad24
> -; SI-CHECK: V_MAD_U32_U24 [[MAD:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
> -; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 16, [[MAD]]
> -; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 16, [[LSHL]]
> +; SI-CHECK: V_MAD_U32_U24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
> +; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 16, [[MAD]]
> +; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 16, [[LSHL]]
>  
>  define void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
>  entry:
> @@ -56,9 +56,9 @@
>  ; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
>  ; EG-CHECK: 24
>  ; SI-CHECK: @i8_mad24
> -; SI-CHECK: V_MAD_U32_U24 [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
> -; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 24, [[MUL]]
> -; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 24, [[LSHL]]
> +; SI-CHECK: V_MAD_U32_U24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
> +; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 24, [[MUL]]
> +; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 24, [[LSHL]]
>  
>  define void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
>  entry:
> Index: test/CodeGen/R600/mul.ll
> ===================================================================
> --- test/CodeGen/R600/mul.ll
> +++ test/CodeGen/R600/mul.ll
> @@ -8,8 +8,8 @@
>  ;EG-CHECK: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @test2
> -;SI-CHECK: V_MUL_LO_I32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_MUL_LO_I32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> @@ -27,10 +27,10 @@
>  ;EG-CHECK: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @test4
> -;SI-CHECK: V_MUL_LO_I32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_MUL_LO_I32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_MUL_LO_I32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_MUL_LO_I32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> Index: test/CodeGen/R600/mul_uint24.ll
> ===================================================================
> --- test/CodeGen/R600/mul_uint24.ll
> +++ test/CodeGen/R600/mul_uint24.ll
> @@ -29,9 +29,9 @@
>  ; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
>  ; EG-CHECK: 16
>  ; SI-CHECK: @i16_mul24
> -; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
> -; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 16, [[MUL]]
> -; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 16, [[LSHL]]
> +; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
> +; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 16, [[MUL]]
> +; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 16, [[LSHL]]
>  
>  define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
>  entry:
> @@ -52,9 +52,9 @@
>  ; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
>  ; EG-CHECK: 24
>  ; SI-CHECK: @i8_mul24
> -; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
> -; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 24, [[MUL]]
> -; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 24, [[LSHL]]
> +; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
> +; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:v[0-9]]], 24, [[MUL]]
> +; SI-CHECK: V_ASHRREV_I32_e32 v{{[0-9]}}, 24, [[LSHL]]
>  
>  define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
>  entry:
> Index: test/CodeGen/R600/mulhu.ll
> ===================================================================
> --- test/CodeGen/R600/mulhu.ll
> +++ test/CodeGen/R600/mulhu.ll
> @@ -1,8 +1,8 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK: V_MOV_B32_e32 VGPR{{[0-9]+}}, -1431655765
> -;CHECK: V_MUL_HI_U32 VGPR0, {{[SV]GPR[0-9]+}}, {{VGPR[0-9]+}}
> -;CHECK-NEXT: V_LSHRREV_B32_e32 VGPR0, 1, VGPR0
> +;CHECK: V_MOV_B32_e32 v{{[0-9]+}}, -1431655765
> +;CHECK: V_MUL_HI_U32 v0, {{[sv][0-9]+}}, {{v[0-9]+}}
> +;CHECK-NEXT: V_LSHRREV_B32_e32 v0, 1, v0
>  
>  define void @test(i32 %p) {
>     %i = udiv i32 %p, 3
> Index: test/CodeGen/R600/or.ll
> ===================================================================
> --- test/CodeGen/R600/or.ll
> +++ test/CodeGen/R600/or.ll
> @@ -6,8 +6,8 @@
>  ; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK-LABEL: @or_v2i32
> -;SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> @@ -25,10 +25,10 @@
>  ; EG-CHECK: OR_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK-LABEL: @or_v4i32
> -;SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_OR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> @@ -43,8 +43,8 @@
>  ; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
>  ; EG-CHECK-DAG: OR_INT * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z
>  ; SI-CHECK-LABEL: @or_i64
> -; SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]}}
> -; SI-CHECK: V_OR_B32_e32 VGPR{{[0-9]}}
> +; SI-CHECK: V_OR_B32_e32 v{{[0-9]}}
> +; SI-CHECK: V_OR_B32_e32 v{{[0-9]}}
>  define void @or_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
>  entry:
>  	%0 = or i64 %a, %b
> Index: test/CodeGen/R600/rotr.ll
> ===================================================================
> --- test/CodeGen/R600/rotr.ll
> +++ test/CodeGen/R600/rotr.ll
> @@ -23,8 +23,8 @@
>  
>  
>  ; SI-CHECK: @rotl
> -; SI-CHECK: V_SUB_I32_e64 [[DST:VGPR[0-9]+]], 32, {{[SV]GPR[0-9]+}}
> -; SI-CHECK: V_ALIGNBIT_B32 {{VGPR[0-9]+, [SV]GPR[0-9]+, VGPR[0-9]+}}, [[DST]]
> +; SI-CHECK: V_SUB_I32_e64 [[DST:v[0-9]+]], 32, {{[sv][0-9]+}}
> +; SI-CHECK: V_ALIGNBIT_B32 {{v[0-9]+, [sv][0-9]+, v[0-9]+}}, [[DST]]
>  define void @rotl(i32 addrspace(1)* %in, i32 %x, i32 %y) {
>  entry:
>    %0 = shl i32 %x, %y
> Index: test/CodeGen/R600/seto.ll
> ===================================================================
> --- test/CodeGen/R600/seto.ll
> +++ test/CodeGen/R600/seto.ll
> @@ -1,6 +1,6 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK: V_CMP_O_F32_e64 SGPR0_SGPR1, {{[SV]GPR[0-9]+, [SV]GPR[0-9]+}}, 0, 0, 0, 0
> +;CHECK: V_CMP_O_F32_e64 s[0:1], {{[sv][0-9]+, [sv][0-9]+}}, 0, 0, 0, 0
>  
>  define void @main(float %p) {
>  main_body:
> Index: test/CodeGen/R600/setuo.ll
> ===================================================================
> --- test/CodeGen/R600/setuo.ll
> +++ test/CodeGen/R600/setuo.ll
> @@ -1,6 +1,6 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -;CHECK: V_CMP_U_F32_e64 SGPR0_SGPR1, {{[SV]GPR[0-9]+, [SV]GPR[0-9]+}}, 0, 0, 0, 0
> +;CHECK: V_CMP_U_F32_e64 s[0:1], {{[sv][0-9]+, [sv][0-9]+}}, 0, 0, 0, 0
>  
>  define void @main(float %p) {
>  main_body:
> Index: test/CodeGen/R600/sgpr-copy.ll
> ===================================================================
> --- test/CodeGen/R600/sgpr-copy.ll
> +++ test/CodeGen/R600/sgpr-copy.ll
> @@ -3,8 +3,8 @@
>  ; This test checks that no VGPR to SGPR copies are created by the register
>  ; allocator.
>  ; CHECK-LABEL: @phi1
> -; CHECK: S_BUFFER_LOAD_DWORD [[DST:SGPR[0-9]]], {{[SGPR_[0-9]+}}, 0
> -; CHECK: V_MOV_B32_e32 VGPR{{[0-9]}}, [[DST]]
> +; CHECK: S_BUFFER_LOAD_DWORD [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0
> +; CHECK: V_MOV_B32_e32 v{{[0-9]}}, [[DST]]
>  
>  define void @phi1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
>  main_body:
> Index: test/CodeGen/R600/shl.ll
> ===================================================================
> --- test/CodeGen/R600/shl.ll
> +++ test/CodeGen/R600/shl.ll
> @@ -6,8 +6,8 @@
>  ;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @shl_v2i32
> -;SI-CHECK: V_LSHL_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_LSHL_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> @@ -25,10 +25,10 @@
>  ;EG-CHECK: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @shl_v4i32
> -;SI-CHECK: V_LSHL_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_LSHL_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_LSHL_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_LSHL_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_LSHL_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> Index: test/CodeGen/R600/si-lod-bias.ll
> ===================================================================
> --- test/CodeGen/R600/si-lod-bias.ll
> +++ test/CodeGen/R600/si-lod-bias.ll
> @@ -1,10 +1,10 @@
>  ;RUN: llc < %s -march=r600 -mcpu=verde -verify-machineinstrs | FileCheck %s
>  
> -; This shader has the potential to generated illeagal VGPR to SGPR copies if
> +; This shader has the potential to generated illegal VGPR to SGPR copies if
>  ; the wrong register class is used for the REG_SEQUENCE instructions.
>  
>  ; CHECK: @main
> -; CHECK: IMAGE_SAMPLE_B VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}, 15, 0, 0, 0, 0, 0, 0, 0, VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}_VGPR{{[0-9]}}
> +; CHECK: IMAGE_SAMPLE_B v{{\[[0-9]:[0-9]\]}}, 15, 0, 0, 0, 0, 0, 0, 0, v{{\[[0-9]:[0-9]\]}}
>  
>  define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
>  main_body:
> Index: test/CodeGen/R600/sra.ll
> ===================================================================
> --- test/CodeGen/R600/sra.ll
> +++ test/CodeGen/R600/sra.ll
> @@ -6,8 +6,8 @@
>  ;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @ashr_v2i32
> -;SI-CHECK: V_ASHR_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_ASHR_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> @@ -25,10 +25,10 @@
>  ;EG-CHECK: ASHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @ashr_v4i32
> -;SI-CHECK: V_ASHR_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_ASHR_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_ASHR_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_ASHR_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_ASHR_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> Index: test/CodeGen/R600/srl.ll
> ===================================================================
> --- test/CodeGen/R600/srl.ll
> +++ test/CodeGen/R600/srl.ll
> @@ -6,8 +6,8 @@
>  ;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @lshr_v2i32
> -;SI-CHECK: V_LSHR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_LSHR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @lshr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> @@ -26,10 +26,10 @@
>  ;EG-CHECK: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @lshr_v4i32
> -;SI-CHECK: V_LSHR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_LSHR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_LSHR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_LSHR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_LSHR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> Index: test/CodeGen/R600/sub.ll
> ===================================================================
> --- test/CodeGen/R600/sub.ll
> +++ test/CodeGen/R600/sub.ll
> @@ -6,8 +6,8 @@
>  ;EG-CHECK: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @test2
> -;SI-CHECK: V_SUB_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_SUB_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> @@ -25,10 +25,10 @@
>  ;EG-CHECK: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @test4
> -;SI-CHECK: V_SUB_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_SUB_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_SUB_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_SUB_I32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_SUB_I32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
>    %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> Index: test/CodeGen/R600/trunc.ll
> ===================================================================
> --- test/CodeGen/R600/trunc.ll
> +++ test/CodeGen/R600/trunc.ll
> @@ -1,12 +1,11 @@
>  ; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
>  ; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
>  
> -
>  define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
>  ; SI-LABEL: @trunc_i64_to_i32_store
> -; SI: S_LOAD_DWORD SGPR0, SGPR0_SGPR1, 11
> -; SI: V_MOV_B32_e32 VGPR0, SGPR0
> -; SI: BUFFER_STORE_DWORD VGPR0
> +; SI: S_LOAD_DWORD s0, s[0:1], 11
> +; SI: V_MOV_B32_e32 v0, s0
> +; SI: BUFFER_STORE_DWORD v0
>  
>  ; EG-LABEL: @trunc_i64_to_i32_store
>  ; EG: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
> @@ -19,9 +18,9 @@
>  
>  ; SI-LABEL: @trunc_shl_i64:
>  ; SI: S_LOAD_DWORDX2
> -; SI: S_LOAD_DWORDX2 [[SREG:SGPR[0-9]+_SGPR[0-9]+]]
> -; SI: V_LSHL_B64 [[LO_VREG:VGPR[0-9]+]]_VGPR{{[0-9]+}}, [[SREG]], 2
> -; SI: BUFFER_STORE_DWORD [[LO_VREG]],
> +; SI: S_LOAD_DWORDX2 [[SREG:s\[[0-9]+:[0-9]+\]]]
> +; SI: V_LSHL_B64 v{{\[}}[[LO_VREG:[0-9]+]]:{{[0-9]+\]}}, [[SREG]], 2
> +; SI: BUFFER_STORE_DWORD v[[LO_VREG]],
>  define void @trunc_shl_i64(i32 addrspace(1)* %out, i64 %a) {
>    %b = shl i64 %a, 2
>    %result = trunc i64 %b to i32
> Index: test/CodeGen/R600/unaligned-load-store.ll
> ===================================================================
> --- test/CodeGen/R600/unaligned-load-store.ll
> +++ test/CodeGen/R600/unaligned-load-store.ll
> @@ -1,8 +1,8 @@
>  ; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
>  
>  ; SI-LABEL: @unaligned_load_store_i32:
> -; SI: V_ADD_I32_e64 [[REG:VGPR[0-9]+]]
> -; DS_READ_U8 {{VGPR[0-9]+}}, 0, [[REG]]
> +; SI: V_ADD_I32_e64 [[REG:v[0-9]+]]
> +; DS_READ_U8 {{v[0-9]+}}, 0, [[REG]]
>  define void @unaligned_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)* %r) nounwind {
>    %v = load i32 addrspace(3)* %p, align 1
>    store i32 %v, i32 addrspace(3)* %r, align 1
> @@ -10,8 +10,8 @@
>  }
>  
>  ; SI-LABEL: @unaligned_load_store_v4i32:
> -; SI: V_ADD_I32_e64 [[REG:VGPR[0-9]+]]
> -; DS_READ_U8 {{VGPR[0-9]+}}, 0, [[REG]]
> +; SI: V_ADD_I32_e64 [[REG:v[0-9]+]]
> +; DS_READ_U8 {{v[0-9]+}}, 0, [[REG]]
>  define void @unaligned_load_store_v4i32(<4 x i32> addrspace(3)* %p, <4 x i32> addrspace(3)* %r) nounwind {
>    %v = load <4 x i32> addrspace(3)* %p, align 1
>    store <4 x i32> %v, <4 x i32> addrspace(3)* %r, align 1
> Index: test/CodeGen/R600/work-item-intrinsics.ll
> ===================================================================
> --- test/CodeGen/R600/work-item-intrinsics.ll
> +++ test/CodeGen/R600/work-item-intrinsics.ll
> @@ -5,8 +5,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[0].X
>  ; SI-CHECK: @ngroups_x
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 0
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 0
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @ngroups_x (i32 addrspace(1)* %out) {
>  entry:
> @@ -19,8 +19,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[0].Y
>  ; SI-CHECK: @ngroups_y
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 1
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 1
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @ngroups_y (i32 addrspace(1)* %out) {
>  entry:
> @@ -33,8 +33,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[0].Z
>  ; SI-CHECK: @ngroups_z
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 2
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 2
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @ngroups_z (i32 addrspace(1)* %out) {
>  entry:
> @@ -47,8 +47,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[0].W
>  ; SI-CHECK: @global_size_x
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 3
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 3
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @global_size_x (i32 addrspace(1)* %out) {
>  entry:
> @@ -61,8 +61,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[1].X
>  ; SI-CHECK: @global_size_y
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 4
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 4
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @global_size_y (i32 addrspace(1)* %out) {
>  entry:
> @@ -75,8 +75,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[1].Y
>  ; SI-CHECK: @global_size_z
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 5
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 5
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @global_size_z (i32 addrspace(1)* %out) {
>  entry:
> @@ -89,8 +89,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[1].Z
>  ; SI-CHECK: @local_size_x
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 6
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 6
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @local_size_x (i32 addrspace(1)* %out) {
>  entry:
> @@ -103,8 +103,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[1].W
>  ; SI-CHECK: @local_size_y
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 7
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 7
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @local_size_y (i32 addrspace(1)* %out) {
>  entry:
> @@ -117,8 +117,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW [[VAL:T[0-9]+\.X]]
>  ; R600-CHECK: MOV [[VAL]], KC0[2].X
>  ; SI-CHECK: @local_size_z
> -; SI-CHECK: S_LOAD_DWORD [[VAL:SGPR[0-9]+]], SGPR0_SGPR1, 8
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], [[VAL]]
> +; SI-CHECK: S_LOAD_DWORD [[VAL:s[0-9]+]], s[0:1], 8
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], [[VAL]]
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @local_size_z (i32 addrspace(1)* %out) {
>  entry:
> @@ -127,12 +127,12 @@
>    ret void
>  }
>  
> -; The tgid values are stored in SGPRs offset by the number of user SGPRs.
> -; Currently we always use exactly 2 user SGPRs for the pointer to the
> +; The tgid values are stored in ss offset by the number of user ss.
> +; Currently we always use exactly 2 user ss for the pointer to the
>  ; kernel arguments, but this may change in the future.
>  
>  ; SI-CHECK: @tgid_x
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], SGPR2
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s2
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @tgid_x (i32 addrspace(1)* %out) {
>  entry:
> @@ -142,7 +142,7 @@
>  }
>  
>  ; SI-CHECK: @tgid_y
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], SGPR3
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s3
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @tgid_y (i32 addrspace(1)* %out) {
>  entry:
> @@ -152,7 +152,7 @@
>  }
>  
>  ; SI-CHECK: @tgid_z
> -; SI-CHECK: V_MOV_B32_e32 [[VVAL:VGPR[0-9]+]], SGPR4
> +; SI-CHECK: V_MOV_B32_e32 [[VVAL:v[0-9]+]], s4
>  ; SI-CHECK: BUFFER_STORE_DWORD [[VVAL]]
>  define void @tgid_z (i32 addrspace(1)* %out) {
>  entry:
> @@ -162,7 +162,7 @@
>  }
>  
>  ; SI-CHECK: @tidig_x
> -; SI-CHECK: BUFFER_STORE_DWORD VGPR0
> +; SI-CHECK: BUFFER_STORE_DWORD v0
>  define void @tidig_x (i32 addrspace(1)* %out) {
>  entry:
>    %0 = call i32 @llvm.r600.read.tidig.x() #0
> @@ -171,7 +171,7 @@
>  }
>  
>  ; SI-CHECK: @tidig_y
> -; SI-CHECK: BUFFER_STORE_DWORD VGPR1
> +; SI-CHECK: BUFFER_STORE_DWORD v1
>  define void @tidig_y (i32 addrspace(1)* %out) {
>  entry:
>    %0 = call i32 @llvm.r600.read.tidig.y() #0
> @@ -180,7 +180,7 @@
>  }
>  
>  ; SI-CHECK: @tidig_z
> -; SI-CHECK: BUFFER_STORE_DWORD VGPR2
> +; SI-CHECK: BUFFER_STORE_DWORD v2
>  define void @tidig_z (i32 addrspace(1)* %out) {
>  entry:
>    %0 = call i32 @llvm.r600.read.tidig.z() #0
> Index: test/CodeGen/R600/xor.ll
> ===================================================================
> --- test/CodeGen/R600/xor.ll
> +++ test/CodeGen/R600/xor.ll
> @@ -6,8 +6,8 @@
>  ;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @xor_v2i32
> -;SI-CHECK: V_XOR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_XOR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_XOR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_XOR_B32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  
>  define void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
> @@ -25,10 +25,10 @@
>  ;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
>  
>  ;SI-CHECK: @xor_v4i32
> -;SI-CHECK: V_XOR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_XOR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_XOR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> -;SI-CHECK: V_XOR_B32_e32 VGPR{{[0-9]+, VGPR[0-9]+, VGPR[0-9]+}}
> +;SI-CHECK: V_XOR_B32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_XOR_B32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_XOR_B32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
> +;SI-CHECK: V_XOR_B32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
>  
>  define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
>    %a = load <4 x i32> addrspace(1) * %in0
> @@ -42,7 +42,7 @@
>  ;EG-CHECK: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
>  
>  ;SI-CHECK: @xor_i1
> -;SI-CHECK: S_XOR_B64 {{SGPR[0-9]+_SGPR[0-9]+, SGPR[0-9]+_SGPR[0-9]+, SGPR[0-9]+_SGPR[0-9]+}}
> +;SI-CHECK: S_XOR_B64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
>  
>  define void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
>    %a = load float addrspace(1) * %in0
> Index: test/CodeGen/R600/zero_extend.ll
> ===================================================================
> --- test/CodeGen/R600/zero_extend.ll
> +++ test/CodeGen/R600/zero_extend.ll
> @@ -6,8 +6,8 @@
>  ; R600-CHECK: MEM_RAT_CACHELESS STORE_RAW
>  
>  ; SI-CHECK: @test
> -; SI-CHECK: V_MOV_B32_e32 [[ZERO:VGPR[0-9]]], 0
> -; SI-CHECK: BUFFER_STORE_DWORDX2 VGPR0_[[ZERO]]
> +; SI-CHECK: V_MOV_B32_e32 v[[ZERO:[0-9]]], 0
> +; SI-CHECK: BUFFER_STORE_DWORDX2 v[0:[[ZERO]]{{\]}}
>  define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
>  entry:
>    %0 = mul i32 %a, %b

> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list