[llvm] 039c917 - AMDGPU/GlobalISel: Fix asserting on gather4 intrinsics

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 17 08:07:48 PDT 2020


Author: Matt Arsenault
Date: 2020-03-17T11:07:30-04:00
New Revision: 039c917b43bb1e3f7365bd88e492a98e3a31ddc7

URL: https://github.com/llvm/llvm-project/commit/039c917b43bb1e3f7365bd88e492a98e3a31ddc7
DIFF: https://github.com/llvm/llvm-project/commit/039c917b43bb1e3f7365bd88e492a98e3a31ddc7.diff

LOG: AMDGPU/GlobalISel: Fix asserting on gather4 intrinsics

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.a16.dim.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.dim.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.o.dim.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 6c50b6df4140..5141ef3d5e1e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -3376,30 +3376,36 @@ bool AMDGPULegalizerInfo::legalizeBufferAtomic(MachineInstr &MI,
 
 /// Turn a set of s16 typed registers in \p A16AddrRegs into a dword sized
 /// vector with s16 typed elements.
-static void packImageA16AddressToDwords(MachineIRBuilder &B,
-                                        MachineInstr &MI,
+static void packImageA16AddressToDwords(MachineIRBuilder &B, MachineInstr &MI,
                                         SmallVectorImpl<Register> &PackedAddrs,
-                                        int DimIdx,
-                                        int NumVAddrs) {
+                                        int AddrIdx, int DimIdx, int NumVAddrs,
+                                        int NumGradients) {
   const LLT S16 = LLT::scalar(16);
   const LLT V2S16 = LLT::vector(2, 16);
 
-  SmallVector<Register, 8> A16AddrRegs;
-  A16AddrRegs.resize(NumVAddrs);
+  for (int I = AddrIdx; I < AddrIdx + NumVAddrs; ++I) {
+    Register AddrReg = MI.getOperand(I).getReg();
 
-  for (int I = 0; I != NumVAddrs; ++I) {
-    A16AddrRegs[I] = MI.getOperand(DimIdx + I).getReg();
-    assert(B.getMRI()->getType(A16AddrRegs[I]) == S16);
-  }
-
-  // Round to dword.
-  if (NumVAddrs % 2 != 0)
-    A16AddrRegs.push_back(B.buildUndef(S16).getReg(0));
-
-  PackedAddrs.resize(A16AddrRegs.size() / 2);
-  for (int I = 0, E = PackedAddrs.size(); I != E; ++I) {
-    PackedAddrs[I] = B.buildBuildVector(
-      V2S16, {A16AddrRegs[2 * I], A16AddrRegs[2 * I + 1]}).getReg(0);
+    if (I < DimIdx) {
+      AddrReg = B.buildBitcast(V2S16, AddrReg).getReg(0);
+      PackedAddrs.push_back(AddrReg);
+    } else {
+      // Dz/dh, dz/dv and the last odd coord are packed with undef. Also, in 1D,
+      // derivatives dx/dh and dx/dv are packed with undef.
+      if (((I + 1) >= (AddrIdx + NumVAddrs)) ||
+          ((NumGradients / 2) % 2 == 1 &&
+           (I == DimIdx + (NumGradients / 2) - 1 ||
+            I == DimIdx + NumGradients - 1))) {
+        PackedAddrs.push_back(
+            B.buildBuildVector(V2S16, {AddrReg, B.buildUndef(S16).getReg(0)})
+                .getReg(0));
+      } else {
+        PackedAddrs.push_back(
+            B.buildBuildVector(V2S16, {AddrReg, MI.getOperand(I + 1).getReg()})
+                .getReg(0));
+        ++I;
+      }
+    }
   }
 }
 
@@ -3419,15 +3425,18 @@ static void convertImageAddrToPacked(MachineIRBuilder &B, MachineInstr &MI,
     MI.getOperand(DimIdx + I).setReg(AMDGPU::NoRegister);
 }
 
-static int getImageNumVAddr(const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr,
-                            const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode) {
+/// Return number of address arguments, and the number of gradients
+static std::pair<int, int>
+getImageNumVAddr(const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr,
+                 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode) {
   const AMDGPU::MIMGDimInfo *DimInfo
     = AMDGPU::getMIMGDimInfo(ImageDimIntr->Dim);
 
   int NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0;
   int NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0;
   int NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0;
-  return BaseOpcode->NumExtraArgs + NumGradients + NumCoords + NumLCM;
+  int NumVAddr = BaseOpcode->NumExtraArgs + NumGradients + NumCoords + NumLCM;
+  return {NumVAddr, NumGradients};
 }
 
 static int getDMaskIdx(const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode,
@@ -3495,7 +3504,8 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
   LLT AddrTy = MRI->getType(MI.getOperand(DimIdx).getReg());
   const bool IsA16 = AddrTy == S16;
 
-  const int NumVAddrs = getImageNumVAddr(ImageDimIntr, BaseOpcode);
+  int NumVAddrs, NumGradients;
+  std::tie(NumVAddrs, NumGradients) = getImageNumVAddr(ImageDimIntr, BaseOpcode);
 
   // If the register allocator cannot place the address registers contiguously
   // without introducing moves, then using the non-sequential address encoding
@@ -3522,7 +3532,8 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
 
     if (NumVAddrs > 1) {
       SmallVector<Register, 4> PackedRegs;
-      packImageA16AddressToDwords(B, MI, PackedRegs, DimIdx, NumVAddrs);
+      packImageA16AddressToDwords(B, MI, PackedRegs, AddrIdx, DimIdx, NumVAddrs,
+                                  NumGradients);
 
       if (!UseNSA && PackedRegs.size() > 1) {
         LLT PackedAddrTy = LLT::vector(2 * PackedRegs.size(), 16);
@@ -3533,16 +3544,16 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
 
       const int NumPacked = PackedRegs.size();
       for (int I = 0; I != NumVAddrs; ++I) {
-        assert(MI.getOperand(DimIdx + I).getReg() != AMDGPU::NoRegister);
+        assert(MI.getOperand(AddrIdx + I).getReg() != AMDGPU::NoRegister);
 
         if (I < NumPacked)
-          MI.getOperand(DimIdx + I).setReg(PackedRegs[I]);
+          MI.getOperand(AddrIdx + I).setReg(PackedRegs[I]);
         else
-          MI.getOperand(DimIdx + I).setReg(AMDGPU::NoRegister);
+          MI.getOperand(AddrIdx + I).setReg(AMDGPU::NoRegister);
       }
     }
   } else if (!UseNSA && NumVAddrs > 1) {
-    convertImageAddrToPacked(B, MI, DimIdx, NumVAddrs);
+    convertImageAddrToPacked(B, MI, AddrIdx, NumVAddrs);
   }
 
   int DMaskLanes = 0;

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.a16.dim.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.a16.dim.ll
new file mode 100644
index 000000000000..d798e2687fd4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.a16.dim.ll
@@ -0,0 +1,1032 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10NSA %s
+
+define amdgpu_ps <4 x float> @gather4_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t) {
+  ; GFX9-LABEL: name: gather4_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 1, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 1, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.2d.v4f32.f16(i32 1, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_cube(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %face) {
+  ; GFX9-LABEL: name: gather4_cube
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cube), 1, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_cube
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cube), 1, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.cube.v4f32.f16(i32 1, half %s, half %t, half %face, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_2darray(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %slice) {
+  ; GFX9-LABEL: name: gather4_2darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2darray), 1, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_2darray
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2darray), 1, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.2darray.v4f32.f16(i32 1, half %s, half %t, half %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t) {
+  ; GFX9-LABEL: name: gather4_c_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.2d), 1, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.2d), 1, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.2d.v4f32.f16(i32 1, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %clamp) {
+  ; GFX9-LABEL: name: gather4_cl_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cl.2d), 1, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_cl_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cl.2d), 1, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.cl.2d.v4f32.f16(i32 1, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %clamp) {
+  ; GFX9-LABEL: name: gather4_c_cl_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX9:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY18]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.cl.2d), 1, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_cl_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX10NSA:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX10NSA:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY18]](s32), [[DEF]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.cl.2d), 1, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.2d.v4f32.f16(i32 1, float %zcompare, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_b_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, half %s, half %t) {
+  ; GFX9-LABEL: name: gather4_b_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.2d), 1, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_b_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.2d), 1, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.b.2d.v4f32.f32.f16(i32 1, float %bias, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_b_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, float %zcompare, half %s, half %t) {
+  ; GFX9-LABEL: name: gather4_c_b_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.2d), 1, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_b_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10NSA:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.2d), 1, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.2d.v4f32.f32.f16(i32 1, float %bias, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, half %s, half %t, half %clamp) {
+  ; GFX9-LABEL: name: gather4_b_cl_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX9:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY18]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.cl.2d), 1, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_b_cl_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX10NSA:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX10NSA:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY18]](s32), [[DEF]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.cl.2d), 1, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.2d.v4f32.f32.f16(i32 1, float %bias, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, float %zcompare, half %s, half %t, half %clamp) {
+  ; GFX9-LABEL: name: gather4_c_b_cl_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[COPY18]](s32)
+  ; GFX9:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY16]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY19]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.cl.2d), 1, [[CONCAT_VECTORS]](<8 x s16>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_b_cl_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10NSA:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[COPY18]](s32)
+  ; GFX10NSA:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY16]](s32)
+  ; GFX10NSA:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY19]](s32), [[DEF]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.cl.2d), 1, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.2d.v4f32.f32.f16(i32 1, float %bias, float %zcompare, half %s, half %t, half %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: gather4_l_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.2d), 1, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_l_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.2d), 1, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f16(i32 1, half %s, half %t, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: gather4_c_l_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX9:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY18]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.2d), 1, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_l_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX10NSA:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX10NSA:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY18]](s32), [[DEF]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.2d), 1, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f16(i32 1, float %zcompare, half %s, half %t, half %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_lz_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t) {
+  ; GFX9-LABEL: name: gather4_lz_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.lz.2d), 1, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_lz_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.lz.2d), 1, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f16(i32 1, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_lz_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t) {
+  ; GFX9-LABEL: name: gather4_c_lz_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.lz.2d), 1, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_lz_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.lz.2d), 1, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.2d.v4f32.f16(i32 1, float %zcompare, half %s, half %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+declare <4 x float> @llvm.amdgcn.image.gather4.2d.v4f32.f16(i32 immarg, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.cube.v4f32.f16(i32 immarg, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.2darray.v4f32.f16(i32 immarg, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.cl.2d.v4f32.f16(i32 immarg, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.b.2d.v4f32.f32.f16(i32 immarg, float, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.b.2d.v4f32.f32.f16(i32 immarg, float, float, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.b.cl.2d.v4f32.f32.f16(i32 immarg, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.2d.v4f32.f32.f16(i32 immarg, float, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f16(i32 immarg, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f16(i32 immarg, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.2d.v4f32.f16(i32 immarg, float, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.cl.2d.v4f32.f16(i32 immarg, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f16(i32 immarg, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.lz.2d.v4f32.f16(i32 immarg, float, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+
+attributes #0 = { nounwind readonly }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.dim.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.dim.ll
new file mode 100644
index 000000000000..9e9407f005c6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.dim.ll
@@ -0,0 +1,1064 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10NSA %s
+
+define amdgpu_ps <4 x float> @gather4_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 1, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 1, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.2d.v4f32.f32(i32 1, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_cube(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %face) {
+  ; GFX6-LABEL: name: gather4_cube
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cube), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_cube
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cube), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.cube.v4f32.f32(i32 1, float %s, float %t, float %face, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_2darray(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %slice) {
+  ; GFX6-LABEL: name: gather4_2darray
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2darray), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_2darray
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2darray), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.2darray.v4f32.f32(i32 1, float %s, float %t, float %slice, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_c_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.2d.v4f32.f32(i32 1, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %clamp) {
+  ; GFX6-LABEL: name: gather4_cl_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cl.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_cl_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cl.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.cl.2d.v4f32.f32(i32 1, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t, float %clamp) {
+  ; GFX6-LABEL: name: gather4_c_cl_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.cl.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_cl_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.cl.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.2d.v4f32.f32(i32 1, float %zcompare, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_b_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_b_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_b_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.b.2d.v4f32.f32.f32(i32 1, float %bias, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_b_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, float %zcompare, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_c_b_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_b_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.2d.v4f32.f32.f32(i32 1, float %bias, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, float %s, float %t, float %clamp) {
+  ; GFX6-LABEL: name: gather4_b_cl_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.cl.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_b_cl_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.cl.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.2d.v4f32.f32.f32(i32 1, float %bias, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_b_cl_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %bias, float %zcompare, float %s, float %t, float %clamp) {
+  ; GFX6-LABEL: name: gather4_c_b_cl_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.cl.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_b_cl_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.cl.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.2d.v4f32.f32.f32(i32 1, float %bias, float %zcompare, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %lod) {
+  ; GFX6-LABEL: name: gather4_l_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_l_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f32(i32 1, float %s, float %t, float %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t, float %lod) {
+  ; GFX6-LABEL: name: gather4_c_l_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_l_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f32(i32 1, float %zcompare, float %s, float %t, float %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_lz_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_lz_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.lz.2d), 1, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_lz_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.lz.2d), 1, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f32(i32 1, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_lz_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_c_lz_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.lz.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_c_lz_2d
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.lz.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.2d.v4f32.f32(i32 1, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_2d_dmask_2(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_2d_dmask_2
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 2, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_2d_dmask_2
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 2, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.2d.v4f32.f32(i32 2, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_2d_dmask_4(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_2d_dmask_4
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 4, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_2d_dmask_4
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 4, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.2d.v4f32.f32(i32 4, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_2d_dmask_8(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_2d_dmask_8
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 8, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10NSA-LABEL: name: gather4_2d_dmask_8
+  ; GFX10NSA: bb.1.main_body:
+  ; GFX10NSA:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10NSA:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10NSA:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10NSA:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10NSA:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10NSA:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10NSA:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10NSA:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10NSA:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10NSA:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10NSA:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10NSA:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10NSA:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10NSA:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10NSA:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10NSA:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10NSA:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX10NSA:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.2d), 8, [[BUILD_VECTOR2]](<2 x s32>), $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10NSA:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10NSA:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10NSA:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10NSA:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10NSA:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10NSA:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.2d.v4f32.f32(i32 8, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+declare <4 x float> @llvm.amdgcn.image.gather4.2d.v4f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.cube.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.2darray.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.2d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.cl.2d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.cl.2d.v4f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.b.2d.v4f32.f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.b.2d.v4f32.f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.b.cl.2d.v4f32.f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.2d.v4f32.f32.f32(i32 immarg, float, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.lz.2d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+
+attributes #0 = { nounwind readonly }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.o.dim.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.o.dim.ll
new file mode 100644
index 000000000000..5338f1deb8e2
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.o.dim.ll
@@ -0,0 +1,782 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+
+define amdgpu_ps <4 x float> @gather4_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.o.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.o.2d.v4f32.f32(i32 1, i32 %offset, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_c_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_c_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.o.2d.v4f32.f32(i32 1, i32 %offset, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_cl_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %clamp) {
+  ; GFX6-LABEL: name: gather4_cl_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cl.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_cl_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.cl.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.cl.o.2d.v4f32.f32(i32 1, i32 %offset, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_cl_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %clamp) {
+  ; GFX6-LABEL: name: gather4_c_cl_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.cl.o.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_c_cl_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.cl.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.cl.o.2d.v4f32.f32(i32 1, i32 %offset, float %zcompare, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_b_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %bias, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_b_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_b_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.b.o.2d.v4f32.f32.f32(i32 1, i32 %offset, float %bias, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_b_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %bias, float %zcompare, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_c_b_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.o.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_c_b_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.o.2d.v4f32.f32.f32(i32 1, i32 %offset, float %bias, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_b_cl_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %bias, float %s, float %t, float %clamp) {
+  ; GFX6-LABEL: name: gather4_b_cl_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.cl.o.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_b_cl_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.b.cl.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.b.cl.o.2d.v4f32.f32.f32(i32 1, i32 %offset, float %bias, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_b_cl_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %bias, float %zcompare, float %s, float %t, float %clamp) {
+  ; GFX6-LABEL: name: gather4_c_b_cl_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX6:   [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<6 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.cl.o.2d), 1, [[BUILD_VECTOR2]](<6 x s32>), $noreg, $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_c_b_cl_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.b.cl.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.o.2d.v4f32.f32.f32(i32 1, i32 %offset, float %bias, float %zcompare, float %s, float %t, float %clamp, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %lod) {
+  ; GFX6-LABEL: name: gather4_l_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_l_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 1, i32 %offset, float %s, float %t, float %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %lod) {
+  ; GFX6-LABEL: name: gather4_c_l_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.o.2d), 1, [[BUILD_VECTOR2]](<5 x s32>), $noreg, $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_c_l_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 1, i32 %offset, float %zcompare, float %s, float %t, float %lod, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_lz_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_lz_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.lz.o.2d), 1, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_lz_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.lz.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.lz.o.2d.v4f32.f32(i32 1, i32 %offset, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_lz_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t) {
+  ; GFX6-LABEL: name: gather4_c_lz_o_2d
+  ; GFX6: bb.1.main_body:
+  ; GFX6:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX6:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX6:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX6:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX6:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX6:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX6:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX6:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX6:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX6:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX6:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX6:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX6:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX6:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX6:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX6:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX6:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX6:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX6:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX6:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX6:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.lz.o.2d), 1, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX6:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX6:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX6:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX6:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX6:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX6:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_c_lz_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.lz.o.2d), 1, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.2d.v4f32.f32(i32 1, i32 %offset, float %zcompare, float %s, float %t, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+declare <4 x float> @llvm.amdgcn.image.gather4.o.2d.v4f32.f32(i32 immarg, i32, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.cl.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.cl.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.b.o.2d.v4f32.f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.b.o.2d.v4f32.f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.b.cl.o.2d.v4f32.f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.o.2d.v4f32.f32.f32(i32 immarg, i32, float, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.lz.o.2d.v4f32.f32(i32 immarg, i32, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.lz.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+
+attributes #0 = { nounwind readonly }


        


More information about the llvm-commits mailing list