[llvm] 42d5609 - AMDGPU/GlobalISel: Start handling _L to _LZ optimization

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 30 14:02:40 PDT 2020


Author: Matt Arsenault
Date: 2020-03-30T17:02:30-04:00
New Revision: 42d5609809836aceff41f78ab652a882d4982260

URL: https://github.com/llvm/llvm-project/commit/42d5609809836aceff41f78ab652a882d4982260
DIFF: https://github.com/llvm/llvm-project/commit/42d5609809836aceff41f78ab652a882d4982260.diff

LOG: AMDGPU/GlobalISel: Start handling _L to _LZ optimization

We currently don't have a way to map to the equivalent intrinsic
opcode, so track immediate 0s in place of the address for the
selection to know to change the final opcode.

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/image_ls_mipmap_zero.a16.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/image_ls_mipmap_zero.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.a16.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 19a2295fc2e6..08e785562bbb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -3551,7 +3551,11 @@ static void packImageA16AddressToDwords(MachineIRBuilder &B, MachineInstr &MI,
   const LLT V2S16 = LLT::vector(2, 16);
 
   for (int I = AddrIdx; I < AddrIdx + NumVAddrs; ++I) {
-    Register AddrReg = MI.getOperand(I).getReg();
+    MachineOperand &SrcOp = MI.getOperand(I);
+    if (!SrcOp.isReg())
+      continue; // _L to _LZ may have eliminated this.
+
+    Register AddrReg = SrcOp.getReg();
 
     if (I < DimIdx) {
       AddrReg = B.buildBitcast(V2S16, AddrReg).getReg(0);
@@ -3562,7 +3566,9 @@ static void packImageA16AddressToDwords(MachineIRBuilder &B, MachineInstr &MI,
       if (((I + 1) >= (AddrIdx + NumVAddrs)) ||
           ((NumGradients / 2) % 2 == 1 &&
            (I == DimIdx + (NumGradients / 2) - 1 ||
-            I == DimIdx + NumGradients - 1))) {
+            I == DimIdx + NumGradients - 1)) ||
+          // Check for _L to _LZ optimization
+          !MI.getOperand(I + 1).isReg()) {
         PackedAddrs.push_back(
             B.buildBuildVector(V2S16, {AddrReg, B.buildUndef(S16).getReg(0)})
                 .getReg(0));
@@ -3580,16 +3586,27 @@ static void packImageA16AddressToDwords(MachineIRBuilder &B, MachineInstr &MI,
 /// and replace the remaining operands with $noreg.
 static void convertImageAddrToPacked(MachineIRBuilder &B, MachineInstr &MI,
                                      int DimIdx, int NumVAddrs) {
-  SmallVector<Register, 8> AddrRegs(NumVAddrs);
+  const LLT S32 = LLT::scalar(32);
+
+  SmallVector<Register, 8> AddrRegs;
   for (int I = 0; I != NumVAddrs; ++I) {
-    AddrRegs[I] = MI.getOperand(DimIdx + I).getReg();
-    assert(B.getMRI()->getType(AddrRegs[I]) == LLT::scalar(32));
+    MachineOperand &SrcOp = MI.getOperand(DimIdx + I);
+    if (SrcOp.isReg()) {
+      AddrRegs.push_back(SrcOp.getReg());
+      assert(B.getMRI()->getType(SrcOp.getReg()) == S32);
+    }
   }
 
-  auto VAddr = B.buildBuildVector(LLT::vector(NumVAddrs, 32), AddrRegs);
-  MI.getOperand(DimIdx).setReg(VAddr.getReg(0));
-  for (int I = 1; I != NumVAddrs; ++I)
-    MI.getOperand(DimIdx + I).setReg(AMDGPU::NoRegister);
+  if (AddrRegs.size() != 1) {
+    auto VAddr = B.buildBuildVector(LLT::vector(AddrRegs.size(), 32), AddrRegs);
+    MI.getOperand(DimIdx).setReg(VAddr.getReg(0));
+  }
+
+  for (int I = 1; I != NumVAddrs; ++I) {
+    MachineOperand &SrcOp = MI.getOperand(DimIdx + I);
+    if (SrcOp.isReg())
+      MI.getOperand(DimIdx + I).setReg(AMDGPU::NoRegister);
+  }
 }
 
 /// Return number of address arguments, and the number of gradients
@@ -3699,6 +3716,41 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
     MI.getOperand(DMaskIdx).setImm(DMask);
   }
 
+  int CorrectedNumVAddrs = NumVAddrs;
+
+  // Optimize _L to _LZ when _L is zero
+  if (const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
+        AMDGPU::getMIMGLZMappingInfo(ImageDimIntr->BaseOpcode)) {
+    const ConstantFP *ConstantLod;
+    const int LodIdx = AddrIdx + NumVAddrs - 1;
+
+    // FIXME: This isn't the cleanest way to handle this, but it's the easiest
+    // option the current infrastructure gives. We really should be changing the
+    // base intrinsic opcode, but the current searchable tables only gives us
+    // the final MI opcode. Eliminate the register here, and track with an
+    // immediate 0 so the final selection will know to do the opcode change.
+    if (mi_match(MI.getOperand(LodIdx).getReg(), *MRI, m_GFCst(ConstantLod))) {
+      if (ConstantLod->isZero() || ConstantLod->isNegative()) {
+        MI.getOperand(LodIdx).ChangeToImmediate(0);
+        --CorrectedNumVAddrs;
+      }
+    }
+  }
+
+  // Optimize _mip away, when 'lod' is zero
+  if (const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
+        AMDGPU::getMIMGMIPMappingInfo(ImageDimIntr->BaseOpcode)) {
+    int64_t ConstantLod;
+    const int LodIdx = AddrIdx + NumVAddrs - 1;
+
+    if (mi_match(MI.getOperand(LodIdx).getReg(), *MRI, m_ICst(ConstantLod))) {
+      if (ConstantLod == 0) {
+        MI.getOperand(LodIdx).ChangeToImmediate(0);
+        --CorrectedNumVAddrs;
+      }
+    }
+  }
+
   // If the register allocator cannot place the address registers contiguously
   // without introducing moves, then using the non-sequential address encoding
   // is always preferable, since it saves VALU instructions and is usually a
@@ -3710,7 +3762,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
   //
   // SIShrinkInstructions will convert NSA encodings to non-NSA after register
   // allocation when possible.
-  const bool UseNSA = NumVAddrs >= 3 &&
+  const bool UseNSA = CorrectedNumVAddrs >= 3 &&
                       ST.hasFeature(AMDGPU::FeatureNSAEncoding);
 
   // Rewrite the addressing register layout before doing anything else.
@@ -3734,12 +3786,18 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
 
       const int NumPacked = PackedRegs.size();
       for (int I = 0; I != NumVAddrs; ++I) {
-        assert(MI.getOperand(AddrIdx + I).getReg() != AMDGPU::NoRegister);
+        MachineOperand &SrcOp = MI.getOperand(AddrIdx + I);
+        if (!SrcOp.isReg()) {
+          assert(SrcOp.isImm() && SrcOp.getImm() == 0);
+          continue;
+        }
+
+        assert(SrcOp.getReg() != AMDGPU::NoRegister);
 
         if (I < NumPacked)
-          MI.getOperand(AddrIdx + I).setReg(PackedRegs[I]);
+          SrcOp.setReg(PackedRegs[I]);
         else
-          MI.getOperand(AddrIdx + I).setReg(AMDGPU::NoRegister);
+          SrcOp.setReg(AMDGPU::NoRegister);
       }
     }
   } else if (!UseNSA && NumVAddrs > 1) {

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/image_ls_mipmap_zero.a16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/image_ls_mipmap_zero.a16.ll
new file mode 100644
index 000000000000..1475ccf83d2a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/image_ls_mipmap_zero.a16.ll
@@ -0,0 +1,717 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+
+define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i16 %s) {
+  ; GFX9-LABEL: name: load_mip_1d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[DEF]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_1d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[DEF]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i16(i32 15, i16 %s, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i16 %s, i16 %t) {
+  ; GFX9-LABEL: name: load_mip_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
+  ; GFX9-LABEL: name: load_mip_3d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_3d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_1darray(<8 x i32> inreg %rsrc, i16 %s, i16 %t) {
+  ; GFX9-LABEL: name: load_mip_1darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_1darray
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i16(i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_2darray(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
+  ; GFX9-LABEL: name: load_mip_2darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_2darray
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_cube(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %u) {
+  ; GFX9-LABEL: name: load_mip_cube
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_cube
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32)
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32)
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps void @store_mip_1d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s) {
+  ; GFX9-LABEL: name: store_mip_1d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_1d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.1d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_2d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t) {
+  ; GFX9-LABEL: name: store_mip_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.2d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_3d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
+  ; GFX9-LABEL: name: store_mip_3d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_3d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.3d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_1darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t) {
+  ; GFX9-LABEL: name: store_mip_1darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_1darray
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_2darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
+  ; GFX9-LABEL: name: store_mip_2darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_2darray
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_cube(<8 x i32> inreg %rsrc, <4 x float> %vdata, i16 %s, i16 %t, i16 %u) {
+  ; GFX9-LABEL: name: store_mip_cube
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_cube
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY17]](s32), [[DEF]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.cube.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %u, i16 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i16(i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0
+declare void @llvm.amdgcn.image.store.mip.1d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.2d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.3d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.cube.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.1darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.2darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1
+
+attributes #0 = { nounwind readonly }
+attributes #1 = { nounwind writeonly }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/image_ls_mipmap_zero.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/image_ls_mipmap_zero.ll
new file mode 100644
index 000000000000..dd4b193d1150
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/image_ls_mipmap_zero.ll
@@ -0,0 +1,617 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+
+define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, i32 %s) {
+  ; GFX9-LABEL: name: load_mip_1d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[COPY8]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_1d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[COPY8]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32 15, i32 %s, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
+  ; GFX9-LABEL: name: load_mip_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR1]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR1]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
+  ; GFX9-LABEL: name: load_mip_3d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[BUILD_VECTOR1]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_3d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_1darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
+  ; GFX9-LABEL: name: load_mip_1darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR1]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_1darray
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR1]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_2darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
+  ; GFX9-LABEL: name: load_mip_2darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[BUILD_VECTOR1]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_2darray
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @load_mip_cube(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %u) {
+  ; GFX9-LABEL: name: load_mip_cube
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[BUILD_VECTOR1]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: load_mip_cube
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps void @store_mip_1d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s) {
+  ; GFX9-LABEL: name: store_mip_1d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[COPY12]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_1d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[COPY12]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.1d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_2d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t) {
+  ; GFX9-LABEL: name: store_mip_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR2]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR2]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.2d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_3d(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
+  ; GFX9-LABEL: name: store_mip_3d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_3d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.3d.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_1darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t) {
+  ; GFX9-LABEL: name: store_mip_1darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR2]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_1darray
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR2]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_2darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
+  ; GFX9-LABEL: name: store_mip_2darray
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_2darray
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_ps void @store_mip_cube(<8 x i32> inreg %rsrc, <4 x float> %vdata, i32 %s, i32 %t, i32 %u) {
+  ; GFX9-LABEL: name: store_mip_cube
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GFX9:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX9:   S_ENDPGM 0
+  ; GFX10-LABEL: name: store_mip_cube
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr6
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   G_AMDGPU_INTRIN_IMAGE_STORE intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8")
+  ; GFX10:   S_ENDPGM 0
+main_body:
+  call void @llvm.amdgcn.image.store.mip.cube.v4f32.i32(<4 x float> %vdata, i32 15, i32 %s, i32 %t, i32 %u, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+  ret void
+}
+
+declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32(i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32(i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #0
+declare void @llvm.amdgcn.image.store.mip.1d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.2d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.3d.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.cube.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.1darray.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
+declare void @llvm.amdgcn.image.store.mip.2darray.v4f32.i32(<4 x float>, i32 immarg, i32, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #1
+
+attributes #0 = { nounwind readonly }
+attributes #1 = { nounwind writeonly }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.a16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.a16.ll
new file mode 100644
index 000000000000..c9af25cf9284
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.a16.ll
@@ -0,0 +1,837 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+
+define amdgpu_ps <4 x float> @sample_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %lod) {
+  ; GFX9-LABEL: name: sample_l_1d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: sample_l_1d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[DEF]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f16(i32 15, half %s, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: sample_l_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: sample_l_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f16(i32 15, half %s, half %t, half -0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_c_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %lod) {
+  ; GFX9-LABEL: name: sample_c_l_1d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: sample_c_l_1d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
+  ; GFX10:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f16(i32 15, float %zcompare, half %s, half -2.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: sample_c_l_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: sample_c_l_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, half %s, half %lod) {
+  ; GFX9-LABEL: name: sample_l_o_1d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.o.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: sample_l_o_1d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32)
+  ; GFX10:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.o.1d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f16(i32 15, i32 %offset, half %s, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: sample_l_o_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.o.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: sample_l_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.o.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f16(i32 15, i32 %offset, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_c_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %s, half %lod) {
+  ; GFX9-LABEL: name: sample_c_l_o_1d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[DEF]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.o.1d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: sample_c_l_o_1d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[DEF]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.o.1d), 15, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f16(i32 15, i32 %offset, float %zcompare, half %s, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: sample_c_l_o_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.o.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: sample_c_l_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.o.2d), 15, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f16(i32 15, i32 %offset, float %zcompare, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: gather4_l_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_l_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f16(i32 15, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: gather4_c_l_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_c_l_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f16(i32 15, float %zcompare, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: gather4_l_o_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.o.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_l_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.o.2d), 15, [[BITCAST]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f16(i32 15, i32 %offset, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, half %s, half %t, half %lod) {
+  ; GFX9-LABEL: name: gather4_c_l_o_2d
+  ; GFX9: bb.1.main_body:
+  ; GFX9:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX9:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX9:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX9:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX9:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX9:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX9:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX9:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX9:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX9:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX9:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX9:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX9:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX9:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX9:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX9:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX9:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX9:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX9:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX9:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX9:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX9:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX9:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX9:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX9:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+  ; GFX9:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.o.2d), 15, [[CONCAT_VECTORS]](<6 x s16>), $noreg, $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX9:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX9:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX9:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX9:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX9:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX9:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; GFX10-LABEL: name: gather4_c_l_o_2d
+  ; GFX10: bb.1.main_body:
+  ; GFX10:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GFX10:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GFX10:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GFX10:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GFX10:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GFX10:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GFX10:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GFX10:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GFX10:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GFX10:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GFX10:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GFX10:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GFX10:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GFX10:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GFX10:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GFX10:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GFX10:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GFX10:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GFX10:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GFX10:   [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY12]](s32)
+  ; GFX10:   [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY13]](s32)
+  ; GFX10:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY14]](s32)
+  ; GFX10:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY15]](s32)
+  ; GFX10:   [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32)
+  ; GFX10:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.o.2d), 15, [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GFX10:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GFX10:   $vgpr0 = COPY [[UV]](s32)
+  ; GFX10:   $vgpr1 = COPY [[UV1]](s32)
+  ; GFX10:   $vgpr2 = COPY [[UV2]](s32)
+  ; GFX10:   $vgpr3 = COPY [[UV3]](s32)
+  ; GFX10:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f16(i32 15, i32 %offset, float %zcompare, half %s, half %t, half 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+declare <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f16(i32 immarg, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f16(i32 immarg, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f16(i32 immarg, float, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f16(i32 immarg, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f16(i32 immarg, i32, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f16(i32 immarg, i32, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f16(i32 immarg, i32, float, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f16(i32 immarg, i32, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f16(i32 immarg, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f16(i32 immarg, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f16(i32 immarg, i32, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f16(i32 immarg, i32, float, half, half, half, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+
+attributes #0 = { nounwind readonly }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.ll
new file mode 100644
index 000000000000..9f638dbb4d7a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.ll
@@ -0,0 +1,420 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -stop-after=legalizer -o - %s | FileCheck -check-prefix=GCN %s
+
+define amdgpu_ps <4 x float> @sample_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %lod) {
+  ; GCN-LABEL: name: sample_l_1d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.1d), 15, [[COPY12]](s32), 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32(i32 15, float %s, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %lod) {
+  ; GCN-LABEL: name: sample_l_2d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.2d), 15, [[BUILD_VECTOR2]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32(i32 15, float %s, float %t, float -0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_c_l_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %lod) {
+  ; GCN-LABEL: name: sample_c_l_1d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.1d), 15, [[BUILD_VECTOR2]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f32(i32 15, float %zcompare, float %s, float -2.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t, float %lod) {
+  ; GCN-LABEL: name: sample_c_l_2d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.2d), 15, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f32(i32 15, float %zcompare, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %lod) {
+  ; GCN-LABEL: name: sample_l_o_1d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.o.1d), 15, [[BUILD_VECTOR2]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f32(i32 15, i32 %offset, float %s, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %lod) {
+  ; GCN-LABEL: name: sample_l_o_2d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.l.o.2d), 15, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_c_l_o_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %lod) {
+  ; GCN-LABEL: name: sample_c_l_o_1d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.o.1d), 15, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @sample_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %lod) {
+  ; GCN-LABEL: name: sample_c_l_o_2d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GCN:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.sample.c.l.o.2d), 15, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %s, float %t, float %lod) {
+  ; GCN-LABEL: name: gather4_l_2d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.2d), 15, [[BUILD_VECTOR2]](<2 x s32>), $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f32(i32 15, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, float %s, float %t, float %lod) {
+  ; GCN-LABEL: name: gather4_c_l_2d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.2d), 15, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f32(i32 15, float %zcompare, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %s, float %t, float %lod) {
+  ; GCN-LABEL: name: gather4_l_o_2d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.l.o.2d), 15, [[BUILD_VECTOR2]](<3 x s32>), $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+define amdgpu_ps <4 x float> @gather4_c_l_o_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, i32 %offset, float %zcompare, float %s, float %t, float %lod) {
+  ; GCN-LABEL: name: gather4_c_l_o_2d
+  ; GCN: bb.1.main_body:
+  ; GCN:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+  ; GCN:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2
+  ; GCN:   [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3
+  ; GCN:   [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4
+  ; GCN:   [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5
+  ; GCN:   [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6
+  ; GCN:   [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7
+  ; GCN:   [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
+  ; GCN:   [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
+  ; GCN:   [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10
+  ; GCN:   [[COPY9:%[0-9]+]]:_(s32) = COPY $sgpr11
+  ; GCN:   [[COPY10:%[0-9]+]]:_(s32) = COPY $sgpr12
+  ; GCN:   [[COPY11:%[0-9]+]]:_(s32) = COPY $sgpr13
+  ; GCN:   [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN:   [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GCN:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr3
+  ; GCN:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; GCN:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
+  ; GCN:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; GCN:   [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(<4 x s32>) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.gather4.c.l.o.2d), 15, [[BUILD_VECTOR2]](<4 x s32>), $noreg, $noreg, $noreg, 0, [[BUILD_VECTOR]](<8 x s32>), [[BUILD_VECTOR1]](<4 x s32>), 0, 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8")
+  ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_INTRIN_IMAGE_LOAD]](<4 x s32>)
+  ; GCN:   $vgpr0 = COPY [[UV]](s32)
+  ; GCN:   $vgpr1 = COPY [[UV1]](s32)
+  ; GCN:   $vgpr2 = COPY [[UV2]](s32)
+  ; GCN:   $vgpr3 = COPY [[UV3]](s32)
+  ; GCN:   SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+main_body:
+  %v = call <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 15, i32 %offset, float %zcompare, float %s, float %t, float 0.000000e+00, <8 x i32> %rsrc, <4 x i32> %samp, i1 false, i32 0, i32 0)
+  ret <4 x float> %v
+}
+
+declare <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.c.l.1d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.c.l.2d.v4f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.l.o.1d.v4f32.f32(i32 immarg, i32, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.1d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.sample.c.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.l.2d.v4f32.f32(i32 immarg, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.l.2d.v4f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.image.gather4.c.l.o.2d.v4f32.f32(i32 immarg, i32, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #0
+
+attributes #0 = { nounwind readonly }


        


More information about the llvm-commits mailing list