[llvm] c0b1291 - AMDGPU/GlobalISel: Use more wide vector load/stores

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 1 07:49:44 PST 2020


Author: Matt Arsenault
Date: 2020-02-01T10:47:21-05:00
New Revision: c0b12916a7e81ed5018cd94606d1d99dc759704e

URL: https://github.com/llvm/llvm-project/commit/c0b12916a7e81ed5018cd94606d1d99dc759704e
DIFF: https://github.com/llvm/llvm-project/commit/c0b12916a7e81ed5018cd94606d1d99dc759704e.diff

LOG: AMDGPU/GlobalISel: Use more wide vector load/stores

This improves the type breakdown for some large vectors. For example,
we now get a <4 x s32> and s32 store instead of 5 s32 stores for
<5 x s32>.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index a65b92eedd6e..fcf4dbc1273a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -788,6 +788,13 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
               // Split if it's too large for the address space.
               if (Query.MMODescrs[0].SizeInBits > MaxSize) {
                 unsigned NumElts = DstTy.getNumElements();
+                unsigned EltSize = EltTy.getSizeInBits();
+
+                if (MaxSize % EltSize == 0) {
+                  return std::make_pair(
+                    0, LLT::scalarOrVector(MaxSize / EltSize, EltTy));
+                }
+
                 unsigned NumPieces = Query.MMODescrs[0].SizeInBits / MaxSize;
 
                 // FIXME: Refine when odd breakdowns handled

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir
index 98001e2f72b6..c314bb30c118 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-flat.mir
@@ -7783,69 +7783,64 @@ body: |
 
     ; CI-LABEL: name: test_load_flat_v3s64_align32
     ; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; CI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8, align 32)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8, align 16)
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; CI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; CI: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8, align 16)
+    ; CI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; CI: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; VI-LABEL: name: test_load_flat_v3s64_align32
     ; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8, align 32)
-    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; VI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8, align 16)
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; VI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; VI: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8, align 16)
+    ; VI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; VI: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; GFX9-LABEL: name: test_load_flat_v3s64_align32
     ; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; GFX9: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8, align 32)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8, align 16)
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8, align 16)
+    ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; CI-MESA-LABEL: name: test_load_flat_v3s64_align32
     ; CI-MESA: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; CI-MESA: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8, align 32)
-    ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32)
+    ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8, align 16)
-    ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; CI-MESA: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; CI-MESA: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; CI-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8, align 16)
+    ; CI-MESA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; CI-MESA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; CI-MESA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; CI-MESA: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; CI-MESA: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; CI-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; GFX9-MESA-LABEL: name: test_load_flat_v3s64_align32
     ; GFX9-MESA: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8, align 32)
-    ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 32)
+    ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8, align 16)
-    ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; GFX9-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8, align 16)
+    ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; GFX9-MESA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; GFX9-MESA: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; GFX9-MESA: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; GFX9-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     %0:_(p0) = COPY $vgpr0_vgpr1
     %1:_(<3 x s64>) = G_LOAD %0 :: (load 24, align 32, addrspace 0)
     %2:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -7861,69 +7856,64 @@ body: |
 
     ; CI-LABEL: name: test_load_flat_v3s64_align8
     ; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; CI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
     ; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8)
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; CI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; CI: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; CI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; CI: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; VI-LABEL: name: test_load_flat_v3s64_align8
     ; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
-    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
     ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; VI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8)
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; VI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; VI: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; VI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; VI: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; GFX9-LABEL: name: test_load_flat_v3s64_align8
     ; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; GFX9: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
     ; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8)
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; CI-MESA-LABEL: name: test_load_flat_v3s64_align8
     ; CI-MESA: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; CI-MESA: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
-    ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8)
+    ; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
     ; CI-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; CI-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI-MESA: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI-MESA: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8)
-    ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; CI-MESA: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; CI-MESA: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; CI-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; CI-MESA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; CI-MESA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; CI-MESA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; CI-MESA: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; CI-MESA: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; CI-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; GFX9-MESA-LABEL: name: test_load_flat_v3s64_align8
     ; GFX9-MESA: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
-    ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
-    ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16, align 8)
+    ; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
     ; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load 8)
-    ; GFX9-MESA: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9-MESA: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9-MESA: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8)
-    ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; GFX9-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; GFX9-MESA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; GFX9-MESA: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; GFX9-MESA: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; GFX9-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     %0:_(p0) = COPY $vgpr0_vgpr1
     %1:_(<3 x s64>) = G_LOAD %0 :: (load 24, align 8, addrspace 0)
     %2:_(<4 x s64>) = G_IMPLICIT_DEF
@@ -8063,6 +8053,7 @@ body: |
     ; CI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[ZEXT7]], [[C10]](s32)
     ; CI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[ZEXT6]], [[SHL11]]
     ; CI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32)
+    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
     ; CI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64)
     ; CI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1)
@@ -8121,10 +8112,12 @@ body: |
     ; CI: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXT11]], [[C10]](s32)
     ; CI: [[OR17:%[0-9]+]]:_(s32) = G_OR [[ZEXT10]], [[SHL17]]
     ; CI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32)
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
-    ; CI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; CI: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; CI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s64>), 0
+    ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[MV2]](s64), 128
+    ; CI: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; VI-LABEL: name: test_load_flat_v3s64_align1
     ; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
     ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
@@ -8235,6 +8228,7 @@ body: |
     ; VI: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[ZEXT7]], [[C9]](s32)
     ; VI: [[OR11:%[0-9]+]]:_(s32) = G_OR [[ZEXT6]], [[SHL11]]
     ; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32)
+    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
     ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64)
     ; VI: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1)
@@ -8285,10 +8279,12 @@ body: |
     ; VI: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXT11]], [[C9]](s32)
     ; VI: [[OR17:%[0-9]+]]:_(s32) = G_OR [[ZEXT10]], [[SHL17]]
     ; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32)
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
-    ; VI: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; VI: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; VI: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s64>), 0
+    ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[MV2]](s64), 128
+    ; VI: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; GFX9-LABEL: name: test_load_flat_v3s64_align1
     ; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
     ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
@@ -8399,6 +8395,7 @@ body: |
     ; GFX9: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[ZEXT7]], [[C9]](s32)
     ; GFX9: [[OR11:%[0-9]+]]:_(s32) = G_OR [[ZEXT6]], [[SHL11]]
     ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32)
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
     ; GFX9: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64)
     ; GFX9: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1)
@@ -8449,10 +8446,12 @@ body: |
     ; GFX9: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXT11]], [[C9]](s32)
     ; GFX9: [[OR17:%[0-9]+]]:_(s32) = G_OR [[ZEXT10]], [[SHL17]]
     ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32)
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
-    ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s64>), 0
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[MV2]](s64), 128
+    ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; CI-MESA-LABEL: name: test_load_flat_v3s64_align1
     ; CI-MESA: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
     ; CI-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
@@ -8579,6 +8578,7 @@ body: |
     ; CI-MESA: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[ZEXT7]], [[C10]](s32)
     ; CI-MESA: [[OR11:%[0-9]+]]:_(s32) = G_OR [[ZEXT6]], [[SHL11]]
     ; CI-MESA: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32)
+    ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
     ; CI-MESA: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI-MESA: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64)
     ; CI-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1)
@@ -8637,10 +8637,12 @@ body: |
     ; CI-MESA: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXT11]], [[C10]](s32)
     ; CI-MESA: [[OR17:%[0-9]+]]:_(s32) = G_OR [[ZEXT10]], [[SHL17]]
     ; CI-MESA: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32)
-    ; CI-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
-    ; CI-MESA: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; CI-MESA: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; CI-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; CI-MESA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; CI-MESA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s64>), 0
+    ; CI-MESA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[MV2]](s64), 128
+    ; CI-MESA: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; CI-MESA: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; CI-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; GFX9-MESA-LABEL: name: test_load_flat_v3s64_align1
     ; GFX9-MESA: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
     ; GFX9-MESA: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
@@ -8751,6 +8753,7 @@ body: |
     ; GFX9-MESA: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[ZEXT7]], [[C9]](s32)
     ; GFX9-MESA: [[OR11:%[0-9]+]]:_(s32) = G_OR [[ZEXT6]], [[SHL11]]
     ; GFX9-MESA: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR10]](s32), [[OR11]](s32)
+    ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
     ; GFX9-MESA: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9-MESA: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64)
     ; GFX9-MESA: [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD15]](p0) :: (load 1)
@@ -8801,10 +8804,12 @@ body: |
     ; GFX9-MESA: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[ZEXT11]], [[C9]](s32)
     ; GFX9-MESA: [[OR17:%[0-9]+]]:_(s32) = G_OR [[ZEXT10]], [[SHL17]]
     ; GFX9-MESA: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR16]](s32), [[OR17]](s32)
-    ; GFX9-MESA: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
-    ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; GFX9-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; GFX9-MESA: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; GFX9-MESA: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s64>), 0
+    ; GFX9-MESA: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[MV2]](s64), 128
+    ; GFX9-MESA: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; GFX9-MESA: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; GFX9-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     %0:_(p0) = COPY $vgpr0_vgpr1
     %1:_(<3 x s64>) = G_LOAD %0 :: (load 24, align 1, addrspace 0)
     %2:_(<4 x s64>) = G_IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir
index 69e0dd47d378..9f8fd5bbaaaa 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-local.mir
@@ -7624,6 +7624,7 @@ body: |
     ; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[OR4]](s16)
     ; SI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C6]](s32)
     ; SI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]]
+    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
     ; SI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1, addrspace 3)
     ; SI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
@@ -7652,8 +7653,10 @@ body: |
     ; SI: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[OR7]](s16)
     ; SI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[ZEXT5]], [[C6]](s32)
     ; SI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[ZEXT4]], [[SHL8]]
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
-    ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+    ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
+    ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s32>), 0
+    ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[OR8]](s32), 64
+    ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>)
     ; CI-LABEL: name: test_load_local_v3s32_align16
     ; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3)
@@ -7719,6 +7722,7 @@ body: |
     ; CI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[OR4]](s16)
     ; CI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C6]](s32)
     ; CI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]]
+    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
     ; CI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1, addrspace 3)
     ; CI: [[PTR_ADD8:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
@@ -7747,8 +7751,10 @@ body: |
     ; CI: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[OR7]](s16)
     ; CI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[ZEXT5]], [[C6]](s32)
     ; CI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[ZEXT4]], [[SHL8]]
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
-    ; CI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+    ; CI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s32>), 0
+    ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[OR8]](s32), 64
+    ; CI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>)
     ; CI-DS128-LABEL: name: test_load_local_v3s32_align16
     ; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3)
@@ -7900,6 +7906,7 @@ body: |
     ; VI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[OR4]](s16)
     ; VI: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C5]](s32)
     ; VI: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]]
+    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
     ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; VI: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32)
     ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1, addrspace 3)
@@ -7925,8 +7932,10 @@ body: |
     ; VI: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[OR7]](s16)
     ; VI: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[ZEXT5]], [[C5]](s32)
     ; VI: [[OR8:%[0-9]+]]:_(s32) = G_OR [[ZEXT4]], [[SHL8]]
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
-    ; VI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+    ; VI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s32>), 0
+    ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[OR8]](s32), 64
+    ; VI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>)
     ; GFX9-LABEL: name: test_load_local_v3s32_align16
     ; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 1, addrspace 3)
@@ -7983,6 +7992,7 @@ body: |
     ; GFX9: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[OR4]](s16)
     ; GFX9: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C5]](s32)
     ; GFX9: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]]
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
     ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C7]](s32)
     ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p3) :: (load 1, addrspace 3)
@@ -8008,8 +8018,10 @@ body: |
     ; GFX9: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[OR7]](s16)
     ; GFX9: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[ZEXT5]], [[C5]](s32)
     ; GFX9: [[OR8:%[0-9]+]]:_(s32) = G_OR [[ZEXT4]], [[SHL8]]
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
-    ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+    ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<2 x s32>), 0
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[OR8]](s32), 64
+    ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>)
     %0:_(p3) = COPY $vgpr0
     %1:_(<3 x s32>) = G_LOAD %0 :: (load 12, align 1, addrspace 3)
     $vgpr0_vgpr1_vgpr2 = COPY %1
@@ -8023,52 +8035,48 @@ body: |
 
     ; SI-LABEL: name: test_load_local_v3s32_align4
     ; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-    ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3)
-    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3)
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4, addrspace 3)
-    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4, addrspace 3)
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
-    ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+    ; SI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
+    ; SI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0
+    ; SI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64
+    ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>)
     ; CI-LABEL: name: test_load_local_v3s32_align4
     ; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-    ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3)
-    ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3)
+    ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4, addrspace 3)
-    ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4, addrspace 3)
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
-    ; CI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+    ; CI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0
+    ; CI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64
+    ; CI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>)
     ; CI-DS128-LABEL: name: test_load_local_v3s32_align4
     ; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CI-DS128: [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[COPY]](p3) :: (load 12, align 4, addrspace 3)
     ; CI-DS128: $vgpr0_vgpr1_vgpr2 = COPY [[LOAD]](<3 x s32>)
     ; VI-LABEL: name: test_load_local_v3s32_align4
     ; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-    ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3)
-    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3)
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4, addrspace 3)
-    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4, addrspace 3)
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
-    ; VI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+    ; VI: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0
+    ; VI: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64
+    ; VI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>)
     ; GFX9-LABEL: name: test_load_local_v3s32_align4
     ; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-    ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, addrspace 3)
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 4, addrspace 3)
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load 4, addrspace 3)
-    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load 4, addrspace 3)
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32)
-    ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+    ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64
+    ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>)
     %0:_(p3) = COPY $vgpr0
     %1:_(<3 x s32>) = G_LOAD %0 :: (load 12, align 4, addrspace 3)
     $vgpr0_vgpr1_vgpr2 = COPY %1
@@ -9172,19 +9180,20 @@ body: |
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; SI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load 8, addrspace 3)
-    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>)
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; SI: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load 8, align 32, addrspace 3)
-    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; SI: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load 8, addrspace 3)
-    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32)
+    ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; SI: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load 8, align 16, addrspace 3)
-    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+    ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
+    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; SI: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load 8, addrspace 3)
-    ; SI: [[CONCAT_VECTORS1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
-    ; SI: [[CONCAT_VECTORS2:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[CONCAT_VECTORS]](<8 x s32>), [[CONCAT_VECTORS1]](<8 x s32>)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS2]](<16 x s32>)
+    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>), [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
     ; CI-LABEL: name: test_load_local_v16s32_align32
     ; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3)
@@ -9197,19 +9206,20 @@ body: |
     ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; CI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load 8, addrspace 3)
-    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>)
     ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; CI: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load 8, align 32, addrspace 3)
-    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; CI: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load 8, addrspace 3)
-    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32)
+    ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; CI: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load 8, align 16, addrspace 3)
-    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+    ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
+    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; CI: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load 8, addrspace 3)
-    ; CI: [[CONCAT_VECTORS1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
-    ; CI: [[CONCAT_VECTORS2:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[CONCAT_VECTORS]](<8 x s32>), [[CONCAT_VECTORS1]](<8 x s32>)
-    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS2]](<16 x s32>)
+    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>), [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
+    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
     ; CI-DS128-LABEL: name: test_load_local_v16s32_align32
     ; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CI-DS128: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load 4, align 32, addrspace 3)
@@ -9272,19 +9282,20 @@ body: |
     ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; VI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load 8, addrspace 3)
-    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>)
     ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; VI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; VI: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load 8, align 32, addrspace 3)
-    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; VI: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load 8, addrspace 3)
-    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32)
+    ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; VI: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load 8, align 16, addrspace 3)
-    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+    ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
+    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; VI: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load 8, addrspace 3)
-    ; VI: [[CONCAT_VECTORS1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
-    ; VI: [[CONCAT_VECTORS2:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[CONCAT_VECTORS]](<8 x s32>), [[CONCAT_VECTORS1]](<8 x s32>)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS2]](<16 x s32>)
+    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>), [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
     ; GFX9-LABEL: name: test_load_local_v16s32_align32
     ; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3)
@@ -9297,19 +9308,20 @@ body: |
     ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; GFX9: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load 8, addrspace 3)
-    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>)
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
     ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; GFX9: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load 8, align 32, addrspace 3)
-    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; GFX9: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load 8, addrspace 3)
-    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32)
+    ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; GFX9: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load 8, align 16, addrspace 3)
-    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+    ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
+    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; GFX9: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load 8, addrspace 3)
-    ; GFX9: [[CONCAT_VECTORS1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
-    ; GFX9: [[CONCAT_VECTORS2:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[CONCAT_VECTORS]](<8 x s32>), [[CONCAT_VECTORS1]](<8 x s32>)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS2]](<16 x s32>)
+    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>), [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
     %0:_(p3) = COPY $vgpr0
     %1:_(<16 x s32>) = G_LOAD %0 :: (load 16, align 32, addrspace 3)
     $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY %1
@@ -10015,17 +10027,16 @@ body: |
     ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
     ; CI-DS128-LABEL: name: test_load_local_v3s64_align32
     ; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-    ; CI-DS128: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3)
-    ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load 16, align 32, addrspace 3)
+    ; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
-    ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8, addrspace 3)
-    ; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; CI-DS128: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p3) :: (load 8, align 16, addrspace 3)
-    ; CI-DS128: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64)
-    ; CI-DS128: [[DEF:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
-    ; CI-DS128: [[INSERT:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF]], [[BUILD_VECTOR]](<3 x s64>), 0
-    ; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
+    ; CI-DS128: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load 8, align 16, addrspace 3)
+    ; CI-DS128: [[DEF:%[0-9]+]]:_(<3 x s64>) = G_IMPLICIT_DEF
+    ; CI-DS128: [[INSERT:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[DEF]], [[LOAD]](<2 x s64>), 0
+    ; CI-DS128: [[INSERT1:%[0-9]+]]:_(<3 x s64>) = G_INSERT [[INSERT]], [[LOAD1]](s64), 128
+    ; CI-DS128: [[DEF1:%[0-9]+]]:_(<4 x s64>) = G_IMPLICIT_DEF
+    ; CI-DS128: [[INSERT2:%[0-9]+]]:_(<4 x s64>) = G_INSERT [[DEF1]], [[INSERT1]](<3 x s64>), 0
+    ; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<4 x s64>)
     ; VI-LABEL: name: test_load_local_v3s64_align32
     ; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p3) :: (load 8, align 32, addrspace 3)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir
index 4a9356f47b65..f39df81aa865 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-private.mir
@@ -5142,84 +5142,84 @@ body: |
 
     ; SI-LABEL: name: test_load_private_v3s16_align8
     ; SI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-    ; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, align 8, addrspace 5)
-    ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
-    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5)
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
-    ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, addrspace 5)
-    ; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
-    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2, align 4, addrspace 5)
-    ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
-    ; SI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
-    ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
-    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; SI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, align 4, addrspace 5)
+    ; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
+    ; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; SI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; SI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; SI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; SI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[LOAD]](<2 x s16>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; SI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; SI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[TRUNC]](s16), 32
+    ; SI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; SI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; SI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; CI-LABEL: name: test_load_private_v3s16_align8
     ; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-    ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, align 8, addrspace 5)
-    ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
-    ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5)
+    ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
-    ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, addrspace 5)
-    ; CI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
-    ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2, align 4, addrspace 5)
-    ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
-    ; CI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
-    ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
-    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; CI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, align 4, addrspace 5)
+    ; CI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
+    ; CI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; CI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; CI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; CI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; CI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[LOAD]](<2 x s16>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; CI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; CI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[TRUNC]](s16), 32
+    ; CI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; CI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; CI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; VI-LABEL: name: test_load_private_v3s16_align8
     ; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-    ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, align 8, addrspace 5)
-    ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
-    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5)
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
-    ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, addrspace 5)
-    ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
-    ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2, align 4, addrspace 5)
-    ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
-    ; VI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
-    ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
-    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; VI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, align 4, addrspace 5)
+    ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
+    ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; VI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; VI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; VI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[LOAD]](<2 x s16>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; VI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; VI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[TRUNC]](s16), 32
+    ; VI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; VI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; VI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; GFX9-LABEL: name: test_load_private_v3s16_align8
     ; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-    ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, align 8, addrspace 5)
-    ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD]](s32)
-    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load 4, align 8, addrspace 5)
+    ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
-    ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, addrspace 5)
-    ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
-    ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
-    ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2, align 4, addrspace 5)
-    ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
-    ; GFX9: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
-    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, align 4, addrspace 5)
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
+    ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; GFX9: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[LOAD]](<2 x s16>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; GFX9: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; GFX9: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[TRUNC]](s16), 32
+    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; GFX9: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; GFX9: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     %0:_(p5) = COPY $vgpr0
     %1:_(<3 x s16>) = G_LOAD %0 :: (load 6, align 8, addrspace 5)
     %2:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -5241,18 +5241,24 @@ body: |
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, addrspace 5)
     ; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
+    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2, addrspace 5)
     ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
-    ; SI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
-    ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
-    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; SI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; SI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; SI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; SI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; SI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[BUILD_VECTOR]](<2 x s16>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; SI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; SI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[TRUNC2]](s16), 32
+    ; SI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; SI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; SI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; CI-LABEL: name: test_load_private_v3s16_align2
     ; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5)
@@ -5261,18 +5267,24 @@ body: |
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, addrspace 5)
     ; CI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
+    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
     ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2, addrspace 5)
     ; CI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
-    ; CI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
-    ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
-    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; CI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; CI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; CI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; CI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; CI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; CI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[BUILD_VECTOR]](<2 x s16>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; CI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; CI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[TRUNC2]](s16), 32
+    ; CI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; CI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; CI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; VI-LABEL: name: test_load_private_v3s16_align2
     ; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5)
@@ -5281,18 +5293,24 @@ body: |
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, addrspace 5)
     ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
+    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
     ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2, addrspace 5)
     ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
-    ; VI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
-    ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
-    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; VI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; VI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; VI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; VI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[BUILD_VECTOR]](<2 x s16>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; VI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; VI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[TRUNC2]](s16), 32
+    ; VI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; VI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; VI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; GFX9-LABEL: name: test_load_private_v3s16_align2
     ; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 2, addrspace 5)
@@ -5301,18 +5319,24 @@ body: |
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 2, addrspace 5)
     ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD1]](s32)
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
     ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 2, addrspace 5)
     ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LOAD2]](s32)
-    ; GFX9: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
-    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; GFX9: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[BUILD_VECTOR]](<2 x s16>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; GFX9: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; GFX9: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[TRUNC2]](s16), 32
+    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; GFX9: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; GFX9: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     %0:_(p5) = COPY $vgpr0
     %1:_(<3 x s16>) = G_LOAD %0 :: (load 6, align 2, addrspace 5)
     %2:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -5355,6 +5379,7 @@ body: |
     ; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[COPY2]](s32)
     ; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
     ; SI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[TRUNC3]]
+    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR]](s16), [[OR1]](s16)
     ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1, addrspace 5)
@@ -5368,14 +5393,19 @@ body: |
     ; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[COPY4]](s32)
     ; SI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL2]](s32)
     ; SI: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]]
-    ; SI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR]](s16), [[OR1]](s16)
-    ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR2]](s16), [[DEF]](s16)
-    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; SI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; SI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; SI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; SI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; SI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[BUILD_VECTOR]](<2 x s16>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; SI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; SI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[OR2]](s16), 32
+    ; SI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; SI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; SI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; CI-LABEL: name: test_load_private_v3s16_align1
     ; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5)
@@ -5405,6 +5435,7 @@ body: |
     ; CI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[COPY2]](s32)
     ; CI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
     ; CI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[TRUNC3]]
+    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR]](s16), [[OR1]](s16)
     ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1, addrspace 5)
@@ -5418,14 +5449,19 @@ body: |
     ; CI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[COPY4]](s32)
     ; CI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[SHL2]](s32)
     ; CI: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[TRUNC5]]
-    ; CI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR]](s16), [[OR1]](s16)
-    ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR2]](s16), [[DEF]](s16)
-    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; CI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; CI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; CI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; CI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; CI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; CI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[BUILD_VECTOR]](<2 x s16>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; CI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; CI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[OR2]](s16), 32
+    ; CI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; CI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; CI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; VI-LABEL: name: test_load_private_v3s16_align1
     ; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5)
@@ -5451,6 +5487,7 @@ body: |
     ; VI: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C1]]
     ; VI: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C2]](s16)
     ; VI: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL1]]
+    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR]](s16), [[OR1]](s16)
     ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1, addrspace 5)
@@ -5462,14 +5499,19 @@ body: |
     ; VI: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC5]], [[C1]]
     ; VI: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16)
     ; VI: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]]
-    ; VI: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR]](s16), [[OR1]](s16)
-    ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR2]](s16), [[DEF]](s16)
-    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; VI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; VI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; VI: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; VI: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; VI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; VI: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[BUILD_VECTOR]](<2 x s16>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; VI: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; VI: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[OR2]](s16), 32
+    ; VI: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; VI: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; VI: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     ; GFX9-LABEL: name: test_load_private_v3s16_align1
     ; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 1, addrspace 5)
@@ -5495,6 +5537,7 @@ body: |
     ; GFX9: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C1]]
     ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C2]](s16)
     ; GFX9: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL1]]
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR]](s16), [[OR1]](s16)
     ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 1, addrspace 5)
@@ -5506,14 +5549,19 @@ body: |
     ; GFX9: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC5]], [[C1]]
     ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[AND5]], [[C2]](s16)
     ; GFX9: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[SHL2]]
-    ; GFX9: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR]](s16), [[OR1]](s16)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[OR2]](s16), [[DEF]](s16)
-    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
-    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+    ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
     ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT]](<3 x s16>), 0
-    ; GFX9: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
+    ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT]], [[BUILD_VECTOR]](<2 x s16>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT1]](<4 x s16>), 0
+    ; GFX9: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT2:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT1]](<3 x s16>), 0
+    ; GFX9: [[INSERT3:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[INSERT2]], [[OR2]](s16), 32
+    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[INSERT3]](<4 x s16>), 0
+    ; GFX9: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[INSERT4:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF3]], [[EXTRACT2]](<3 x s16>), 0
+    ; GFX9: $vgpr0_vgpr1 = COPY [[INSERT4]](<4 x s16>)
     %0:_(p5) = COPY $vgpr0
     %1:_(<3 x s16>) = G_LOAD %0 :: (load 6, align 1, addrspace 5)
     %2:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -7989,108 +8037,104 @@ body: |
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4, align 8, addrspace 5)
-    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+    ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
-    ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+    ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4, align 16, addrspace 5)
-    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32)
-    ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+    ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4, align 8, addrspace 5)
-    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+    ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD6]](s32), [[LOAD7]](s32)
-    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s32>), [[BUILD_VECTOR1]](<2 x s32>), [[BUILD_VECTOR2]](<2 x s32>), [[BUILD_VECTOR3]](<2 x s32>)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
     ; CI-LABEL: name: test_load_private_v8s32_align32
     ; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5)
     ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
     ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4, align 8, addrspace 5)
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+    ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
-    ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+    ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4, align 16, addrspace 5)
-    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32)
-    ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+    ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4, align 8, addrspace 5)
-    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+    ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD6]](s32), [[LOAD7]](s32)
-    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s32>), [[BUILD_VECTOR1]](<2 x s32>), [[BUILD_VECTOR2]](<2 x s32>), [[BUILD_VECTOR3]](<2 x s32>)
-    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
+    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
     ; VI-LABEL: name: test_load_private_v8s32_align32
     ; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5)
     ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
     ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4, align 8, addrspace 5)
-    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+    ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
-    ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+    ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4, align 16, addrspace 5)
-    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32)
-    ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+    ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4, align 8, addrspace 5)
-    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+    ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD6]](s32), [[LOAD7]](s32)
-    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s32>), [[BUILD_VECTOR1]](<2 x s32>), [[BUILD_VECTOR2]](<2 x s32>), [[BUILD_VECTOR3]](<2 x s32>)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
     ; GFX9-LABEL: name: test_load_private_v8s32_align32
     ; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5)
     ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
     ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4, align 8, addrspace 5)
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+    ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
-    ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
+    ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4, align 16, addrspace 5)
-    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32)
-    ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
+    ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4, align 8, addrspace 5)
-    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
+    ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD6]](s32), [[LOAD7]](s32)
-    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s32>), [[BUILD_VECTOR1]](<2 x s32>), [[BUILD_VECTOR2]](<2 x s32>), [[BUILD_VECTOR3]](<2 x s32>)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
     %0:_(p5) = COPY $vgpr0
     %1:_(<8 x s32>) = G_LOAD %0 :: (load 16, align 32, addrspace 5)
     $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
@@ -8114,39 +8158,44 @@ body: |
     ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
     ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
     ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; SI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; SI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4, align 16, addrspace 5)
-    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; SI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4, addrspace 5)
-    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32)
+    ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; SI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4, align 8, addrspace 5)
-    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+    ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; SI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
-    ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
-    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+    ; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
     ; SI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 4, align 32, addrspace 5)
-    ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+    ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
+    ; SI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
     ; SI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 4, addrspace 5)
-    ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32)
+    ; SI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; SI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
     ; SI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 4, align 8, addrspace 5)
-    ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+    ; SI: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
+    ; SI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
     ; SI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32)
-    ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
-    ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+    ; SI: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; SI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
     ; SI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 4, align 16, addrspace 5)
-    ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+    ; SI: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
+    ; SI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
     ; SI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 4, addrspace 5)
-    ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+    ; SI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
+    ; SI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
     ; SI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 4, align 8, addrspace 5)
-    ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+    ; SI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
+    ; SI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
     ; SI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
-    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<4 x s32>), [[BUILD_VECTOR1]](<4 x s32>), [[BUILD_VECTOR2]](<4 x s32>), [[BUILD_VECTOR3]](<4 x s32>)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
+    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
     ; CI-LABEL: name: test_load_private_v16s32_align32
     ; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5)
@@ -8159,39 +8208,44 @@ body: |
     ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
     ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
     ; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; CI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4, align 16, addrspace 5)
-    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; CI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4, addrspace 5)
-    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32)
+    ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; CI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4, align 8, addrspace 5)
-    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+    ; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; CI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
-    ; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
-    ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+    ; CI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; CI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
     ; CI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 4, align 32, addrspace 5)
-    ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+    ; CI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
+    ; CI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
     ; CI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 4, addrspace 5)
-    ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32)
+    ; CI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
     ; CI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 4, align 8, addrspace 5)
-    ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+    ; CI: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
+    ; CI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
     ; CI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32)
-    ; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
-    ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+    ; CI: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; CI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
     ; CI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 4, align 16, addrspace 5)
-    ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+    ; CI: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
+    ; CI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
     ; CI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 4, addrspace 5)
-    ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+    ; CI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
+    ; CI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
     ; CI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 4, align 8, addrspace 5)
-    ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+    ; CI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
+    ; CI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
     ; CI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
-    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<4 x s32>), [[BUILD_VECTOR1]](<4 x s32>), [[BUILD_VECTOR2]](<4 x s32>), [[BUILD_VECTOR3]](<4 x s32>)
-    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
+    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
+    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
     ; VI-LABEL: name: test_load_private_v16s32_align32
     ; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5)
@@ -8204,39 +8258,44 @@ body: |
     ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
     ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
     ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; VI: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; VI: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4, align 16, addrspace 5)
-    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; VI: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4, addrspace 5)
-    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32)
+    ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; VI: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4, align 8, addrspace 5)
-    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+    ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; VI: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
-    ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
-    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+    ; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
     ; VI: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 4, align 32, addrspace 5)
-    ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+    ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
+    ; VI: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
     ; VI: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 4, addrspace 5)
-    ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32)
+    ; VI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; VI: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
     ; VI: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 4, align 8, addrspace 5)
-    ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+    ; VI: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
+    ; VI: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
     ; VI: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32)
-    ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
-    ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+    ; VI: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; VI: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
     ; VI: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 4, align 16, addrspace 5)
-    ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+    ; VI: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
+    ; VI: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
     ; VI: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 4, addrspace 5)
-    ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+    ; VI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
+    ; VI: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
     ; VI: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 4, align 8, addrspace 5)
-    ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+    ; VI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
+    ; VI: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
     ; VI: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
-    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<4 x s32>), [[BUILD_VECTOR1]](<4 x s32>), [[BUILD_VECTOR2]](<4 x s32>), [[BUILD_VECTOR3]](<4 x s32>)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
+    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
     ; GFX9-LABEL: name: test_load_private_v16s32_align32
     ; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, align 32, addrspace 5)
@@ -8249,39 +8308,44 @@ body: |
     ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
     ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
     ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
     ; GFX9: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD3]](p5) :: (load 4, align 16, addrspace 5)
-    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
+    ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
     ; GFX9: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD4]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s32)
+    ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
     ; GFX9: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD5]](p5) :: (load 4, align 8, addrspace 5)
-    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s32)
+    ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C6]](s32)
     ; GFX9: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD6]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32)
-    ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
-    ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
+    ; GFX9: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C7]](s32)
     ; GFX9: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD7]](p5) :: (load 4, align 32, addrspace 5)
-    ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C]](s32)
+    ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
+    ; GFX9: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C8]](s32)
     ; GFX9: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD8]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s32)
+    ; GFX9: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; GFX9: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C9]](s32)
     ; GFX9: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD9]](p5) :: (load 4, align 8, addrspace 5)
-    ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s32)
+    ; GFX9: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
+    ; GFX9: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C10]](s32)
     ; GFX9: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD10]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32)
-    ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
-    ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
+    ; GFX9: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
+    ; GFX9: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C11]](s32)
     ; GFX9: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD11]](p5) :: (load 4, align 16, addrspace 5)
-    ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C]](s32)
+    ; GFX9: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
+    ; GFX9: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C12]](s32)
     ; GFX9: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD12]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s32)
+    ; GFX9: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
+    ; GFX9: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C13]](s32)
     ; GFX9: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD13]](p5) :: (load 4, align 8, addrspace 5)
-    ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s32)
+    ; GFX9: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
+    ; GFX9: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C14]](s32)
     ; GFX9: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD14]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
-    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<4 x s32>), [[BUILD_VECTOR1]](<4 x s32>), [[BUILD_VECTOR2]](<4 x s32>), [[BUILD_VECTOR3]](<4 x s32>)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32), [[LOAD4]](s32), [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32), [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[BUILD_VECTOR]](<16 x s32>)
     %0:_(p5) = COPY $vgpr0
     %1:_(<16 x s32>) = G_LOAD %0 :: (load 16, align 32, addrspace 5)
     $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY %1
@@ -9999,60 +10063,56 @@ body: |
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; SI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4, addrspace 5)
-    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+    ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; SI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
-    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s32>), [[BUILD_VECTOR1]](<2 x s32>)
-    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
+    ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
+    ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     ; CI-LABEL: name: test_extload_private_v4s32_from_8_align4
     ; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5)
     ; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
     ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; CI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; CI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4, addrspace 5)
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+    ; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; CI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
-    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s32>), [[BUILD_VECTOR1]](<2 x s32>)
-    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
+    ; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
+    ; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     ; VI-LABEL: name: test_extload_private_v4s32_from_8_align4
     ; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5)
     ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
     ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; VI: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4, addrspace 5)
-    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+    ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; VI: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
-    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s32>), [[BUILD_VECTOR1]](<2 x s32>)
-    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
+    ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
+    ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     ; GFX9-LABEL: name: test_extload_private_v4s32_from_8_align4
     ; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
     ; GFX9: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (load 4, addrspace 5)
     ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
     ; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
     ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
     ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
     ; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
+    ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C2]](s32)
     ; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p5) :: (load 4, addrspace 5)
-    ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD2]](s32), [[LOAD3]](s32)
-    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s32>), [[BUILD_VECTOR1]](<2 x s32>)
-    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
+    ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[LOAD3]](s32)
+    ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
     %0:_(p5) = COPY $vgpr0
     %1:_(<4 x s32>) = G_LOAD %0 :: (load 8, align 4, addrspace 5)
     $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir
index c6364389d9e5..512e540bed73 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store-global.mir
@@ -4430,7 +4430,9 @@ body: |
     ; SI-LABEL: name: test_store_global_v5s32_align1
     ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; SI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
@@ -4524,16 +4526,16 @@ body: |
     ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD14]](p1) :: (store 1, addrspace 1)
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
-    ; SI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
+    ; SI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32)
     ; SI: [[COPY33:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; SI: [[COPY34:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
+    ; SI: [[COPY34:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
     ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY34]], [[C2]]
     ; SI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[AND8]], [[COPY33]](s32)
     ; SI: [[COPY35:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
     ; SI: [[COPY36:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32)
     ; SI: [[AND9:%[0-9]+]]:_(s32) = G_AND [[COPY36]], [[C2]]
     ; SI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[AND9]], [[COPY35]](s32)
-    ; SI: [[COPY37:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
+    ; SI: [[COPY37:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
     ; SI: G_STORE [[COPY37]](s32), [[PTR_ADD15]](p1) :: (store 1, addrspace 1)
     ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
     ; SI: [[COPY38:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32)
@@ -4547,24 +4549,18 @@ body: |
     ; CI-LABEL: name: test_store_global_v5s32_align1
     ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 1, addrspace 1)
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 1, addrspace 1)
     ; VI-LABEL: name: test_store_global_v5s32_align1
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
     ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
     ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
@@ -4642,12 +4638,12 @@ body: |
     ; VI: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store 1, addrspace 1)
     ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
-    ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[UV4]](s32)
-    ; VI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
+    ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[EXTRACT1]](s32)
+    ; VI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32)
     ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
     ; VI: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C1]](s16)
     ; VI: [[LSHR14:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC9]], [[C1]](s16)
-    ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
+    ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
     ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD15]](p1) :: (store 1, addrspace 1)
     ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
     ; VI: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
@@ -4661,20 +4657,12 @@ body: |
     ; GFX9-LABEL: name: test_store_global_v5s32_align1
     ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 1, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 1, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
     %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
     G_STORE %1, %0 :: (store 20, align 1, addrspace 1)
@@ -4689,10 +4677,13 @@ body: |
     ; SI-LABEL: name: test_store_global_v5s32_align2
     ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; SI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; SI: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
+    ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](<2 x s32>)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
-    ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
+    ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
+    ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
     ; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
     ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
@@ -4700,60 +4691,578 @@ body: |
     ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2, addrspace 1)
     ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
     ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
-    ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
+    ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32)
+    ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
     ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
     ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
     ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2, addrspace 1)
     ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
-    ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
+    ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>)
+    ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
+    ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
     ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
     ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
     ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2, addrspace 1)
-    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32)
-    ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
+    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+    ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32)
+    ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
     ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
     ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
     ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2, addrspace 1)
-    ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
-    ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
+    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32)
+    ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
+    ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2, addrspace 1)
+    ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+    ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
+    ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v5s32_align2
+    ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 2, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5s32_align2
+    ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; VI: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
+    ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](<2 x s32>)
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
+    ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
+    ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
+    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2, addrspace 1)
+    ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32)
+    ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
+    ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2, addrspace 1)
+    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+    ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+    ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2, addrspace 1)
+    ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>)
+    ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
+    ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
+    ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2, addrspace 1)
+    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+    ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+    ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2, addrspace 1)
+    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+    ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32)
+    ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
+    ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2, addrspace 1)
+    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+    ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+    ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2, addrspace 1)
+    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32)
+    ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
+    ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2, addrspace 1)
+    ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
+    ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
+    ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v5s32_align2
+    ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 2, addrspace 1)
+    %0:_(p1) = COPY $vgpr0_vgpr1
+    %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    G_STORE %1, %0 :: (store 20, align 2, addrspace 1)
+...
+
+---
+name: test_store_global_v5s32_align4
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+
+    ; SI-LABEL: name: test_store_global_v5s32_align4
+    ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; SI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v5s32_align4
+    ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5s32_align4
+    ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v5s32_align4
+    ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
+    %0:_(p1) = COPY $vgpr0_vgpr1
+    %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    G_STORE %1, %0 :: (store 20, align 4, addrspace 1)
+...
+
+---
+name: test_store_global_v5s32_align8
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+
+    ; SI-LABEL: name: test_store_global_v5s32_align8
+    ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; SI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 8, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v5s32_align8
+    ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 8, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5s32_align8
+    ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 8, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v5s32_align8
+    ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 8, addrspace 1)
+    %0:_(p1) = COPY $vgpr0_vgpr1
+    %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    G_STORE %1, %0 :: (store 20, align 8, addrspace 1)
+...
+
+---
+name: test_store_global_v5s32_align16
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+
+    ; SI-LABEL: name: test_store_global_v5s32_align16
+    ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; SI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v5s32_align16
+    ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5s32_align16
+    ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v5s32_align16
+    ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[COPY1]](<5 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x s32>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    %0:_(p1) = COPY $vgpr0_vgpr1
+    %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    G_STORE %1, %0 :: (store 20, align 16, addrspace 1)
+...
+
+---
+name: test_store_global_v5p3_align1
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI-LABEL: name: test_store_global_v5p3_align1
+    ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; SI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; SI: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3), [[UV2:%[0-9]+]]:_(p3), [[UV3:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[EXTRACT]](<4 x p3>)
+    ; SI: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3)
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
+    ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[PTRTOINT]](s32)
+    ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
+    ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[COPY2]](s32)
+    ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
+    ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[C1]](s32)
+    ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[PTRTOINT]](s32)
+    ; SI: G_STORE [[COPY5]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
+    ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+    ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD]](p1) :: (store 1, addrspace 1)
+    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD1]](p1) :: (store 1, addrspace 1)
+    ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+    ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+    ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD2]](p1) :: (store 1, addrspace 1)
+    ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+    ; SI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3)
+    ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C]](s32)
+    ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32)
+    ; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C2]]
+    ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[COPY9]](s32)
+    ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+    ; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C2]]
+    ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[COPY11]](s32)
+    ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32)
+    ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD3]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+    ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
+    ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD4]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+    ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+    ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD5]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C5]](s64)
+    ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
+    ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD6]](p1) :: (store 1, addrspace 1)
+    ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+    ; SI: [[PTRTOINT2:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV2]](p3)
+    ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT2]], [[C]](s32)
+    ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[PTRTOINT2]](s32)
+    ; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C2]]
+    ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[AND4]], [[COPY17]](s32)
+    ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
+    ; SI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY20]], [[C2]]
+    ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[AND5]], [[COPY19]](s32)
+    ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[PTRTOINT2]](s32)
+    ; SI: G_STORE [[COPY21]](s32), [[PTR_ADD7]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+    ; SI: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
+    ; SI: G_STORE [[COPY22]](s32), [[PTR_ADD8]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+    ; SI: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
+    ; SI: G_STORE [[COPY23]](s32), [[PTR_ADD9]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C5]](s64)
+    ; SI: [[COPY24:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32)
+    ; SI: G_STORE [[COPY24]](s32), [[PTR_ADD10]](p1) :: (store 1, addrspace 1)
+    ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
+    ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+    ; SI: [[PTRTOINT3:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV3]](p3)
+    ; SI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT3]], [[C]](s32)
+    ; SI: [[COPY25:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[COPY26:%[0-9]+]]:_(s32) = COPY [[PTRTOINT3]](s32)
+    ; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY26]], [[C2]]
+    ; SI: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[COPY25]](s32)
+    ; SI: [[COPY27:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[COPY28:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32)
+    ; SI: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY28]], [[C2]]
+    ; SI: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[AND7]], [[COPY27]](s32)
+    ; SI: [[COPY29:%[0-9]+]]:_(s32) = COPY [[PTRTOINT3]](s32)
+    ; SI: G_STORE [[COPY29]](s32), [[PTR_ADD11]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+    ; SI: [[COPY30:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32)
+    ; SI: G_STORE [[COPY30]](s32), [[PTR_ADD12]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+    ; SI: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32)
+    ; SI: G_STORE [[COPY31]](s32), [[PTR_ADD13]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C5]](s64)
+    ; SI: [[COPY32:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32)
+    ; SI: G_STORE [[COPY32]](s32), [[PTR_ADD14]](p1) :: (store 1, addrspace 1)
+    ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
+    ; SI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32)
+    ; SI: [[COPY33:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[COPY34:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
+    ; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY34]], [[C2]]
+    ; SI: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[AND8]], [[COPY33]](s32)
+    ; SI: [[COPY35:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+    ; SI: [[COPY36:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32)
+    ; SI: [[AND9:%[0-9]+]]:_(s32) = G_AND [[COPY36]], [[C2]]
+    ; SI: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[AND9]], [[COPY35]](s32)
+    ; SI: [[COPY37:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
+    ; SI: G_STORE [[COPY37]](s32), [[PTR_ADD15]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
+    ; SI: [[COPY38:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32)
+    ; SI: G_STORE [[COPY38]](s32), [[PTR_ADD16]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+    ; SI: [[COPY39:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32)
+    ; SI: G_STORE [[COPY39]](s32), [[PTR_ADD17]](p1) :: (store 1, addrspace 1)
+    ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
+    ; SI: [[COPY40:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32)
+    ; SI: G_STORE [[COPY40]](s32), [[PTR_ADD18]](p1) :: (store 1, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v5p3_align1
+    ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 1, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 1, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5p3_align1
+    ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; VI: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3), [[UV2:%[0-9]+]]:_(p3), [[UV3:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[EXTRACT]](<4 x p3>)
+    ; VI: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3)
+    ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[PTRTOINT]](s32)
+    ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
+    ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+    ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+    ; VI: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
+    ; VI: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
+    ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[PTRTOINT]](s32)
+    ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1)
+    ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
+    ; VI: G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p1) :: (store 1, addrspace 1)
+    ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store 1, addrspace 1)
+    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
+    ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD2]](p1) :: (store 1, addrspace 1)
+    ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+    ; VI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3)
+    ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[PTRTOINT1]](s32)
+    ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C]](s32)
+    ; VI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+    ; VI: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C1]](s16)
+    ; VI: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C1]](s16)
+    ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32)
+    ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+    ; VI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
+    ; VI: G_STORE [[ANYEXT2]](s32), [[PTR_ADD4]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C3]](s64)
+    ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+    ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD5]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C4]](s64)
+    ; VI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
+    ; VI: G_STORE [[ANYEXT3]](s32), [[PTR_ADD6]](p1) :: (store 1, addrspace 1)
+    ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
+    ; VI: [[PTRTOINT2:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV2]](p3)
+    ; VI: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[PTRTOINT2]](s32)
+    ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT2]], [[C]](s32)
+    ; VI: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
+    ; VI: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC4]], [[C1]](s16)
+    ; VI: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC5]], [[C1]](s16)
+    ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[PTRTOINT2]](s32)
+    ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD7]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+    ; VI: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR7]](s16)
+    ; VI: G_STORE [[ANYEXT4]](s32), [[PTR_ADD8]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+    ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
+    ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD9]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C4]](s64)
+    ; VI: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR8]](s16)
+    ; VI: G_STORE [[ANYEXT5]](s32), [[PTR_ADD10]](p1) :: (store 1, addrspace 1)
+    ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
+    ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
+    ; VI: [[PTRTOINT3:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV3]](p3)
+    ; VI: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[PTRTOINT3]](s32)
+    ; VI: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT3]], [[C]](s32)
+    ; VI: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR9]](s32)
+    ; VI: [[LSHR10:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC6]], [[C1]](s16)
+    ; VI: [[LSHR11:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC7]], [[C1]](s16)
+    ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[PTRTOINT3]](s32)
+    ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD11]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+    ; VI: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR10]](s16)
+    ; VI: G_STORE [[ANYEXT6]](s32), [[PTR_ADD12]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C3]](s64)
+    ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32)
+    ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD13]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C4]](s64)
+    ; VI: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR11]](s16)
+    ; VI: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store 1, addrspace 1)
+    ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+    ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[EXTRACT1]](s32)
+    ; VI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32)
+    ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
+    ; VI: [[LSHR13:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC8]], [[C1]](s16)
+    ; VI: [[LSHR14:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC9]], [[C1]](s16)
+    ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
+    ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD15]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C2]](s64)
+    ; VI: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR13]](s16)
+    ; VI: G_STORE [[ANYEXT8]](s32), [[PTR_ADD16]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C3]](s64)
+    ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32)
+    ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD17]](p1) :: (store 1, addrspace 1)
+    ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
+    ; VI: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR14]](s16)
+    ; VI: G_STORE [[ANYEXT9]](s32), [[PTR_ADD18]](p1) :: (store 1, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v5p3_align1
+    ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 1, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 1, addrspace 1)
+    %0:_(p1) = COPY $vgpr0_vgpr1
+    %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    G_STORE %1, %0 :: (store 20, align 1, addrspace 1)
+...
+
+---
+name: test_store_global_v5p3_align2
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+
+    ; SI-LABEL: name: test_store_global_v5p3_align2
+    ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; SI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; SI: [[UV:%[0-9]+]]:_(<2 x p3>), [[UV1:%[0-9]+]]:_(<2 x p3>) = G_UNMERGE_VALUES [[EXTRACT]](<4 x p3>)
+    ; SI: [[UV2:%[0-9]+]]:_(p3), [[UV3:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[UV]](<2 x p3>)
+    ; SI: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV2]](p3)
+    ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
+    ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[PTRTOINT]](s32)
+    ; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
+    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+    ; SI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2, addrspace 1)
+    ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; SI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV3]](p3)
+    ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C]](s32)
+    ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32)
+    ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2, addrspace 1)
+    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
+    ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+    ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2, addrspace 1)
+    ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; SI: [[UV4:%[0-9]+]]:_(p3), [[UV5:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[UV1]](<2 x p3>)
+    ; SI: [[PTRTOINT2:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV4]](p3)
+    ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT2]], [[C]](s32)
+    ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[PTRTOINT2]](s32)
+    ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2, addrspace 1)
+    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
+    ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+    ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2, addrspace 1)
+    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+    ; SI: [[PTRTOINT3:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV5]](p3)
+    ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT3]], [[C]](s32)
+    ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[PTRTOINT3]](s32)
+    ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2, addrspace 1)
+    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
+    ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+    ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2, addrspace 1)
+    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32)
+    ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
     ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
     ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
     ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2, addrspace 1)
-    ; CI-LABEL: name: test_store_global_v5s32_align2
+    ; CI-LABEL: name: test_store_global_v5p3_align2
     ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 2, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 2, addrspace 1)
-    ; VI-LABEL: name: test_store_global_v5s32_align2
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 2, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5p3_align2
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; VI: [[UV:%[0-9]+]]:_(<2 x p3>), [[UV1:%[0-9]+]]:_(<2 x p3>) = G_UNMERGE_VALUES [[EXTRACT]](<4 x p3>)
+    ; VI: [[UV2:%[0-9]+]]:_(p3), [[UV3:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[UV]](<2 x p3>)
+    ; VI: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV2]](p3)
     ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
-    ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
+    ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
+    ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[PTRTOINT]](s32)
     ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
     ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
@@ -4761,292 +5270,340 @@ body: |
     ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store 2, addrspace 1)
     ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
     ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
-    ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
+    ; VI: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV3]](p3)
+    ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT1]], [[C]](s32)
+    ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[PTRTOINT1]](s32)
     ; VI: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
     ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
     ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p1) :: (store 2, addrspace 1)
     ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
-    ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
+    ; VI: [[UV4:%[0-9]+]]:_(p3), [[UV5:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[UV1]](<2 x p3>)
+    ; VI: [[PTRTOINT2:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV4]](p3)
+    ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT2]], [[C]](s32)
+    ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[PTRTOINT2]](s32)
     ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
     ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
     ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p1) :: (store 2, addrspace 1)
-    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32)
-    ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
+    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+    ; VI: [[PTRTOINT3:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV5]](p3)
+    ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT3]], [[C]](s32)
+    ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[PTRTOINT3]](s32)
     ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD5]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
     ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
     ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD6]](p1) :: (store 2, addrspace 1)
-    ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
-    ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
+    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT1]], [[C]](s32)
+    ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT1]](s32)
     ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD7]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
     ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
     ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD8]](p1) :: (store 2, addrspace 1)
-    ; GFX9-LABEL: name: test_store_global_v5s32_align2
+    ; GFX9-LABEL: name: test_store_global_v5p3_align2
     ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 2, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 2, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 2, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
-    %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
     G_STORE %1, %0 :: (store 20, align 2, addrspace 1)
 ...
 
 ---
-name: test_store_global_v5s32_align4
+name: test_store_global_v5p3_align4
 body: |
   bb.0:
     liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
 
-    ; SI-LABEL: name: test_store_global_v5s32_align4
+    ; SI-LABEL: name: test_store_global_v5p3_align4
     ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; SI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; SI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; SI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; SI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; SI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; SI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; SI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, addrspace 1)
-    ; CI-LABEL: name: test_store_global_v5s32_align4
+    ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v5p3_align4
     ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, addrspace 1)
-    ; VI-LABEL: name: test_store_global_v5s32_align4
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5p3_align4
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; VI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; VI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; VI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; VI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; VI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, addrspace 1)
-    ; GFX9-LABEL: name: test_store_global_v5s32_align4
+    ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v5p3_align4
     ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
-    %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
     G_STORE %1, %0 :: (store 20, align 4, addrspace 1)
 ...
 
 ---
-name: test_store_global_v5s32_align8
+name: test_store_global_v5p3_align8
 body: |
   bb.0:
     liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
 
-    ; SI-LABEL: name: test_store_global_v5s32_align8
+    ; SI-LABEL: name: test_store_global_v5p3_align8
     ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; SI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; SI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; SI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; SI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; SI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; SI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; SI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI-LABEL: name: test_store_global_v5s32_align8
+    ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 8, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v5p3_align8
     ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI-LABEL: name: test_store_global_v5s32_align8
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 8, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5p3_align8
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; VI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; VI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; VI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; VI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; VI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9-LABEL: name: test_store_global_v5s32_align8
+    ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 8, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v5p3_align8
     ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 8, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 8, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
-    %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
     G_STORE %1, %0 :: (store 20, align 8, addrspace 1)
 ...
 
 ---
-name: test_store_global_v5s32_align16
+name: test_store_global_v5p3_align16
 body: |
   bb.0:
     liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
 
-    ; SI-LABEL: name: test_store_global_v5s32_align16
+    ; SI-LABEL: name: test_store_global_v5p3_align16
     ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; SI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 16, addrspace 1)
-    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; SI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; SI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; SI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; SI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; SI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; SI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 16, addrspace 1)
-    ; CI-LABEL: name: test_store_global_v5s32_align16
+    ; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v5p3_align16
     ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; CI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 16, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; CI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 16, addrspace 1)
-    ; VI-LABEL: name: test_store_global_v5s32_align16
+    ; CI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v5p3_align16
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; VI: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 16, addrspace 1)
-    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; VI: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; VI: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; VI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; VI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; VI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; VI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 16, addrspace 1)
-    ; GFX9-LABEL: name: test_store_global_v5s32_align16
+    ; VI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v5p3_align16
     ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
-    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<5 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 16, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[COPY1:%[0-9]+]]:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x p3>) = G_EXTRACT [[COPY1]](<5 x p3>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<5 x p3>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<4 x p3>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 16, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
-    %1:_(<5 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    %1:_(<5 x p3>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+    G_STORE %1, %0 :: (store 20, align 16, addrspace 1)
+...
+
+---
+name: test_store_global_v10s16_align4
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+
+    ; SI-LABEL: name: test_store_global_v10s16_align4
+    ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; SI: [[DEF:%[0-9]+]]:_(<10 x s32>) = G_IMPLICIT_DEF
+    ; SI: [[TRUNC:%[0-9]+]]:_(<10 x s16>) = G_TRUNC [[DEF]](<10 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[TRUNC]](<10 x s16>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[TRUNC]](<10 x s16>), 128
+    ; SI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; SI: G_STORE [[EXTRACT1]](<2 x s16>), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v10s16_align4
+    ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; CI: [[DEF:%[0-9]+]]:_(<10 x s32>) = G_IMPLICIT_DEF
+    ; CI: [[TRUNC:%[0-9]+]]:_(<10 x s16>) = G_TRUNC [[DEF]](<10 x s32>)
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[TRUNC]](<10 x s16>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[TRUNC]](<10 x s16>), 128
+    ; CI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CI: G_STORE [[EXTRACT1]](<2 x s16>), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v10s16_align4
+    ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; VI: [[DEF:%[0-9]+]]:_(<10 x s32>) = G_IMPLICIT_DEF
+    ; VI: [[TRUNC:%[0-9]+]]:_(<10 x s16>) = G_TRUNC [[DEF]](<10 x s32>)
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[TRUNC]](<10 x s16>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[TRUNC]](<10 x s16>), 128
+    ; VI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; VI: G_STORE [[EXTRACT1]](<2 x s16>), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v10s16_align4
+    ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; GFX9: [[DEF:%[0-9]+]]:_(<10 x s32>) = G_IMPLICIT_DEF
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(<10 x s16>) = G_TRUNC [[DEF]](<10 x s32>)
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[TRUNC]](<10 x s16>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[TRUNC]](<10 x s16>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; GFX9: G_STORE [[EXTRACT1]](<2 x s16>), [[PTR_ADD]](p1) :: (store 4, align 16, addrspace 1)
+    %0:_(p1) = COPY $vgpr0_vgpr1
+    %1:_(<10 x s16>) = G_IMPLICIT_DEF
+    G_STORE %1, %0 :: (store 20, align 16, addrspace 1)
+...
+
+---
+name: test_store_global_v11s16_align4
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+
+    ; SI-LABEL: name: test_store_global_v11s16_align4
+    ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; SI: [[DEF:%[0-9]+]]:_(<11 x s16>) = G_IMPLICIT_DEF
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[DEF]](<11 x s16>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<11 x s16>), 128
+    ; SI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; SI: G_STORE [[EXTRACT1]](<3 x s16>), [[PTR_ADD]](p1) :: (store 6, align 16, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v11s16_align4
+    ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; CI: [[DEF:%[0-9]+]]:_(<11 x s16>) = G_IMPLICIT_DEF
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[DEF]](<11 x s16>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<11 x s16>), 128
+    ; CI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CI: G_STORE [[EXTRACT1]](<3 x s16>), [[PTR_ADD]](p1) :: (store 6, align 16, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v11s16_align4
+    ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; VI: [[DEF:%[0-9]+]]:_(<11 x s16>) = G_IMPLICIT_DEF
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[DEF]](<11 x s16>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<11 x s16>), 128
+    ; VI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; VI: G_STORE [[EXTRACT1]](<3 x s16>), [[PTR_ADD]](p1) :: (store 6, align 16, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v11s16_align4
+    ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; GFX9: [[DEF:%[0-9]+]]:_(<11 x s16>) = G_IMPLICIT_DEF
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[DEF]](<11 x s16>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<11 x s16>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; GFX9: G_STORE [[EXTRACT1]](<3 x s16>), [[PTR_ADD]](p1) :: (store 6, align 16, addrspace 1)
+    %0:_(p1) = COPY $vgpr0_vgpr1
+    %1:_(<11 x s16>) = G_IMPLICIT_DEF
+    G_STORE %1, %0 :: (store 20, align 16, addrspace 1)
+...
+
+---
+name: test_store_global_v12s16_align4
+body: |
+  bb.0:
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
+
+    ; SI-LABEL: name: test_store_global_v12s16_align4
+    ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; SI: [[DEF:%[0-9]+]]:_(<12 x s32>) = G_IMPLICIT_DEF
+    ; SI: [[TRUNC:%[0-9]+]]:_(<12 x s16>) = G_TRUNC [[DEF]](<12 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[TRUNC]](<12 x s16>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[TRUNC]](<12 x s16>), 128
+    ; SI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; SI: G_STORE [[EXTRACT1]](<4 x s16>), [[PTR_ADD]](p1) :: (store 8, align 16, addrspace 1)
+    ; CI-LABEL: name: test_store_global_v12s16_align4
+    ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; CI: [[DEF:%[0-9]+]]:_(<12 x s32>) = G_IMPLICIT_DEF
+    ; CI: [[TRUNC:%[0-9]+]]:_(<12 x s16>) = G_TRUNC [[DEF]](<12 x s32>)
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[TRUNC]](<12 x s16>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[TRUNC]](<12 x s16>), 128
+    ; CI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CI: G_STORE [[EXTRACT1]](<4 x s16>), [[PTR_ADD]](p1) :: (store 8, align 16, addrspace 1)
+    ; VI-LABEL: name: test_store_global_v12s16_align4
+    ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; VI: [[DEF:%[0-9]+]]:_(<12 x s32>) = G_IMPLICIT_DEF
+    ; VI: [[TRUNC:%[0-9]+]]:_(<12 x s16>) = G_TRUNC [[DEF]](<12 x s32>)
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[TRUNC]](<12 x s16>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[TRUNC]](<12 x s16>), 128
+    ; VI: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; VI: G_STORE [[EXTRACT1]](<4 x s16>), [[PTR_ADD]](p1) :: (store 8, align 16, addrspace 1)
+    ; GFX9-LABEL: name: test_store_global_v12s16_align4
+    ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
+    ; GFX9: [[DEF:%[0-9]+]]:_(<12 x s32>) = G_IMPLICIT_DEF
+    ; GFX9: [[TRUNC:%[0-9]+]]:_(<12 x s16>) = G_TRUNC [[DEF]](<12 x s32>)
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<8 x s16>) = G_EXTRACT [[TRUNC]](<12 x s16>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[TRUNC]](<12 x s16>), 128
+    ; GFX9: G_STORE [[EXTRACT]](<8 x s16>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; GFX9: G_STORE [[EXTRACT1]](<4 x s16>), [[PTR_ADD]](p1) :: (store 8, align 16, addrspace 1)
+    %0:_(p1) = COPY $vgpr0_vgpr1
+    %1:_(<12 x s16>) = G_IMPLICIT_DEF
     G_STORE %1, %0 :: (store 20, align 16, addrspace 1)
 ...
 
@@ -7008,9 +7565,11 @@ body: |
     ; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; SI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; SI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; SI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
+    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; SI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
     ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
@@ -7104,6 +7663,7 @@ body: |
     ; SI: G_STORE [[COPY34]](s32), [[PTR_ADD14]](p1) :: (store 1, addrspace 1)
     ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
+    ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s32>)
     ; SI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
     ; SI: [[COPY35:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
     ; SI: [[COPY36:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
@@ -7124,8 +7684,7 @@ body: |
     ; SI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
     ; SI: [[COPY42:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32)
     ; SI: G_STORE [[COPY42]](s32), [[PTR_ADD18]](p1) :: (store 1, addrspace 1)
-    ; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; SI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64)
+    ; SI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
     ; SI: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32)
     ; SI: [[COPY43:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
     ; SI: [[COPY44:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
@@ -7146,8 +7705,7 @@ body: |
     ; SI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C5]](s64)
     ; SI: [[COPY50:%[0-9]+]]:_(s32) = COPY [[LSHR17]](s32)
     ; SI: G_STORE [[COPY50]](s32), [[PTR_ADD22]](p1) :: (store 1, addrspace 1)
-    ; SI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; SI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64)
+    ; SI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64)
     ; SI: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32)
     ; SI: [[COPY51:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
     ; SI: [[COPY52:%[0-9]+]]:_(s32) = COPY [[UV6]](s32)
@@ -7168,8 +7726,7 @@ body: |
     ; SI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C5]](s64)
     ; SI: [[COPY58:%[0-9]+]]:_(s32) = COPY [[LSHR20]](s32)
     ; SI: G_STORE [[COPY58]](s32), [[PTR_ADD26]](p1) :: (store 1, addrspace 1)
-    ; SI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; SI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64)
+    ; SI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C8]](s64)
     ; SI: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32)
     ; SI: [[COPY59:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
     ; SI: [[COPY60:%[0-9]+]]:_(s32) = COPY [[UV7]](s32)
@@ -7190,18 +7747,18 @@ body: |
     ; SI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C5]](s64)
     ; SI: [[COPY66:%[0-9]+]]:_(s32) = COPY [[LSHR23]](s32)
     ; SI: G_STORE [[COPY66]](s32), [[PTR_ADD30]](p1) :: (store 1, addrspace 1)
-    ; SI: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; SI: [[PTR_ADD31:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C13]](s64)
-    ; SI: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32)
+    ; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; SI: [[PTR_ADD31:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64)
+    ; SI: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C]](s32)
     ; SI: [[COPY67:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; SI: [[COPY68:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
+    ; SI: [[COPY68:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32)
     ; SI: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY68]], [[C2]]
     ; SI: [[LSHR25:%[0-9]+]]:_(s32) = G_LSHR [[AND16]], [[COPY67]](s32)
     ; SI: [[COPY69:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
     ; SI: [[COPY70:%[0-9]+]]:_(s32) = COPY [[LSHR24]](s32)
     ; SI: [[AND17:%[0-9]+]]:_(s32) = G_AND [[COPY70]], [[C2]]
     ; SI: [[LSHR26:%[0-9]+]]:_(s32) = G_LSHR [[AND17]], [[COPY69]](s32)
-    ; SI: [[COPY71:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
+    ; SI: [[COPY71:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32)
     ; SI: G_STORE [[COPY71]](s32), [[PTR_ADD31]](p1) :: (store 1, addrspace 1)
     ; SI: [[PTR_ADD32:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C3]](s64)
     ; SI: [[COPY72:%[0-9]+]]:_(s32) = COPY [[LSHR25]](s32)
@@ -7217,42 +7774,27 @@ body: |
     ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; CI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; CI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; CI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; CI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; CI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 1, addrspace 1)
+    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; CI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; CI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; CI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, align 1, addrspace 1)
-    ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; CI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 1, addrspace 1)
+    ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 1, addrspace 1)
     ; VI-LABEL: name: test_store_global_v9s32_align1
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; VI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; VI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; VI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
+    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
     ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
     ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
@@ -7330,6 +7872,7 @@ body: |
     ; VI: G_STORE [[ANYEXT7]](s32), [[PTR_ADD14]](p1) :: (store 1, addrspace 1)
     ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
+    ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s32>)
     ; VI: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[UV4]](s32)
     ; VI: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
     ; VI: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR12]](s32)
@@ -7346,8 +7889,7 @@ body: |
     ; VI: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C4]](s64)
     ; VI: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR14]](s16)
     ; VI: G_STORE [[ANYEXT9]](s32), [[PTR_ADD18]](p1) :: (store 1, addrspace 1)
-    ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; VI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
+    ; VI: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C5]](s64)
     ; VI: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[UV5]](s32)
     ; VI: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32)
     ; VI: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR15]](s32)
@@ -7364,8 +7906,7 @@ body: |
     ; VI: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD19]], [[C4]](s64)
     ; VI: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR17]](s16)
     ; VI: G_STORE [[ANYEXT11]](s32), [[PTR_ADD22]](p1) :: (store 1, addrspace 1)
-    ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; VI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C10]](s64)
+    ; VI: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C6]](s64)
     ; VI: [[TRUNC12:%[0-9]+]]:_(s16) = G_TRUNC [[UV6]](s32)
     ; VI: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32)
     ; VI: [[TRUNC13:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR18]](s32)
@@ -7382,8 +7923,7 @@ body: |
     ; VI: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD23]], [[C4]](s64)
     ; VI: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR20]](s16)
     ; VI: G_STORE [[ANYEXT13]](s32), [[PTR_ADD26]](p1) :: (store 1, addrspace 1)
-    ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; VI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C11]](s64)
+    ; VI: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C7]](s64)
     ; VI: [[TRUNC14:%[0-9]+]]:_(s16) = G_TRUNC [[UV7]](s32)
     ; VI: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32)
     ; VI: [[TRUNC15:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR21]](s32)
@@ -7400,14 +7940,14 @@ body: |
     ; VI: [[PTR_ADD30:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD27]], [[C4]](s64)
     ; VI: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR23]](s16)
     ; VI: G_STORE [[ANYEXT15]](s32), [[PTR_ADD30]](p1) :: (store 1, addrspace 1)
-    ; VI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; VI: [[PTR_ADD31:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C12]](s64)
-    ; VI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[UV8]](s32)
-    ; VI: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32)
+    ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; VI: [[PTR_ADD31:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
+    ; VI: [[TRUNC16:%[0-9]+]]:_(s16) = G_TRUNC [[EXTRACT2]](s32)
+    ; VI: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C]](s32)
     ; VI: [[TRUNC17:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR24]](s32)
     ; VI: [[LSHR25:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC16]], [[C1]](s16)
     ; VI: [[LSHR26:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC17]], [[C1]](s16)
-    ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
+    ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32)
     ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD31]](p1) :: (store 1, addrspace 1)
     ; VI: [[PTR_ADD32:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD31]], [[C2]](s64)
     ; VI: [[ANYEXT16:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR25]](s16)
@@ -7423,34 +7963,17 @@ body: |
     ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; GFX9: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; GFX9: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; GFX9: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 1, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 1, addrspace 1)
+    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; GFX9: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; GFX9: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; GFX9: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, align 1, addrspace 1)
-    ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; GFX9: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 1, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 1, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
     %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
@@ -7470,12 +7993,15 @@ body: |
     ; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; SI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; SI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; SI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
+    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; SI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; SI: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
+    ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](<2 x s32>)
     ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
-    ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
+    ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
+    ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
     ; SI: G_STORE [[COPY4]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
     ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
@@ -7483,64 +8009,64 @@ body: |
     ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2, addrspace 1)
     ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
     ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
-    ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
+    ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32)
+    ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
     ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
     ; SI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
     ; SI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 2, addrspace 1)
     ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
-    ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
+    ; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>)
+    ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
+    ; SI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
     ; SI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
     ; SI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
     ; SI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 2, addrspace 1)
-    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32)
-    ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
+    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+    ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32)
+    ; SI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
     ; SI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
     ; SI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
     ; SI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 2, addrspace 1)
-    ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
-    ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
+    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; SI: [[UV6:%[0-9]+]]:_(<2 x s32>), [[UV7:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s32>)
+    ; SI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV6]](<2 x s32>)
+    ; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32)
+    ; SI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
     ; SI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
     ; SI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
     ; SI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 2, addrspace 1)
-    ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32)
-    ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
+    ; SI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+    ; SI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C]](s32)
+    ; SI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV9]](s32)
     ; SI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
     ; SI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
     ; SI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 2, addrspace 1)
-    ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32)
-    ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV6]](s32)
+    ; SI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+    ; SI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV7]](<2 x s32>)
+    ; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV10]], [[C]](s32)
+    ; SI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV10]](s32)
     ; SI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
     ; SI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
     ; SI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 2, addrspace 1)
-    ; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
-    ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32)
-    ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV7]](s32)
+    ; SI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+    ; SI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV11]], [[C]](s32)
+    ; SI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV11]](s32)
     ; SI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
     ; SI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
     ; SI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 2, addrspace 1)
-    ; SI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
-    ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32)
-    ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
+    ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; SI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+    ; SI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C]](s32)
+    ; SI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32)
     ; SI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 2, addrspace 1)
     ; SI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
     ; SI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32)
@@ -7550,45 +8076,31 @@ body: |
     ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; CI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; CI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; CI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; CI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; CI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 2, addrspace 1)
+    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; CI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; CI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; CI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, align 2, addrspace 1)
-    ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; CI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 2, addrspace 1)
+    ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 2, addrspace 1)
     ; VI-LABEL: name: test_store_global_v9s32_align2
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; VI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; VI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; VI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
+    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; VI: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>)
+    ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](<2 x s32>)
     ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
-    ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
+    ; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
+    ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
     ; VI: G_STORE [[COPY4]](s32), [[COPY]](p1) :: (store 2, addrspace 1)
     ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
@@ -7596,64 +8108,64 @@ body: |
     ; VI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store 2, addrspace 1)
     ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
     ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
-    ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
+    ; VI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32)
+    ; VI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
     ; VI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
     ; VI: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
     ; VI: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p1) :: (store 2, addrspace 1)
     ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
-    ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
+    ; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](<2 x s32>)
+    ; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
+    ; VI: [[COPY8:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
     ; VI: G_STORE [[COPY8]](s32), [[PTR_ADD3]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C1]](s64)
     ; VI: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
     ; VI: G_STORE [[COPY9]](s32), [[PTR_ADD4]](p1) :: (store 2, addrspace 1)
-    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV3]], [[C]](s32)
-    ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV3]](s32)
+    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD3]], [[C2]](s64)
+    ; VI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32)
+    ; VI: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
     ; VI: G_STORE [[COPY10]](s32), [[PTR_ADD5]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD5]], [[C1]](s64)
     ; VI: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
     ; VI: G_STORE [[COPY11]](s32), [[PTR_ADD6]](p1) :: (store 2, addrspace 1)
-    ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV4]], [[C]](s32)
-    ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV4]](s32)
+    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; VI: [[UV6:%[0-9]+]]:_(<2 x s32>), [[UV7:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s32>)
+    ; VI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV6]](<2 x s32>)
+    ; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32)
+    ; VI: [[COPY12:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
     ; VI: G_STORE [[COPY12]](s32), [[PTR_ADD7]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C1]](s64)
     ; VI: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
     ; VI: G_STORE [[COPY13]](s32), [[PTR_ADD8]](p1) :: (store 2, addrspace 1)
-    ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV5]], [[C]](s32)
-    ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV5]](s32)
+    ; VI: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C2]](s64)
+    ; VI: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV9]], [[C]](s32)
+    ; VI: [[COPY14:%[0-9]+]]:_(s32) = COPY [[UV9]](s32)
     ; VI: G_STORE [[COPY14]](s32), [[PTR_ADD9]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD9]], [[C1]](s64)
     ; VI: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
     ; VI: G_STORE [[COPY15]](s32), [[PTR_ADD10]](p1) :: (store 2, addrspace 1)
-    ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV6]], [[C]](s32)
-    ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV6]](s32)
+    ; VI: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD7]], [[C3]](s64)
+    ; VI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV7]](<2 x s32>)
+    ; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV10]], [[C]](s32)
+    ; VI: [[COPY16:%[0-9]+]]:_(s32) = COPY [[UV10]](s32)
     ; VI: G_STORE [[COPY16]](s32), [[PTR_ADD11]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C1]](s64)
     ; VI: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
     ; VI: G_STORE [[COPY17]](s32), [[PTR_ADD12]](p1) :: (store 2, addrspace 1)
-    ; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C8]](s64)
-    ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV7]], [[C]](s32)
-    ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV7]](s32)
+    ; VI: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD11]], [[C2]](s64)
+    ; VI: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV11]], [[C]](s32)
+    ; VI: [[COPY18:%[0-9]+]]:_(s32) = COPY [[UV11]](s32)
     ; VI: G_STORE [[COPY18]](s32), [[PTR_ADD13]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD13]], [[C1]](s64)
     ; VI: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
     ; VI: G_STORE [[COPY19]](s32), [[PTR_ADD14]](p1) :: (store 2, addrspace 1)
-    ; VI: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C9]](s64)
-    ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV8]], [[C]](s32)
-    ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[UV8]](s32)
+    ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; VI: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
+    ; VI: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[EXTRACT2]], [[C]](s32)
+    ; VI: [[COPY20:%[0-9]+]]:_(s32) = COPY [[EXTRACT2]](s32)
     ; VI: G_STORE [[COPY20]](s32), [[PTR_ADD15]](p1) :: (store 2, addrspace 1)
     ; VI: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD15]], [[C1]](s64)
     ; VI: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32)
@@ -7663,34 +8175,17 @@ body: |
     ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; GFX9: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; GFX9: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; GFX9: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 2, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 2, addrspace 1)
+    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; GFX9: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; GFX9: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; GFX9: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, align 2, addrspace 1)
-    ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; GFX9: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 2, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 2, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
     %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
@@ -7710,133 +8205,65 @@ body: |
     ; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; SI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; SI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; SI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; SI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; SI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 4, addrspace 1)
+    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; SI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; SI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; SI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; SI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; SI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; SI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; SI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, addrspace 1)
+    ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
     ; CI-LABEL: name: test_store_global_v9s32_align4
     ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; CI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; CI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; CI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; CI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; CI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 4, addrspace 1)
+    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; CI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; CI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; CI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; CI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, addrspace 1)
+    ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
     ; VI-LABEL: name: test_store_global_v9s32_align4
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; VI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; VI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; VI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; VI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 4, addrspace 1)
+    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; VI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; VI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; VI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; VI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; VI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; VI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; VI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, addrspace 1)
+    ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
     ; GFX9-LABEL: name: test_store_global_v9s32_align4
     ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; GFX9: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; GFX9: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; GFX9: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 4, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 4, addrspace 1)
+    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; GFX9: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; GFX9: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; GFX9: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; GFX9: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
     %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
@@ -7856,133 +8283,65 @@ body: |
     ; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; SI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; SI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; SI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; SI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; SI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 8, addrspace 1)
+    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; SI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; SI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; SI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; SI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; SI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; SI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; SI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 8, addrspace 1)
+    ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
     ; CI-LABEL: name: test_store_global_v9s32_align8
     ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; CI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; CI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; CI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; CI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; CI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 8, addrspace 1)
+    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; CI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; CI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; CI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; CI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 8, addrspace 1)
+    ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
     ; VI-LABEL: name: test_store_global_v9s32_align8
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; VI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; VI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; VI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; VI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 8, addrspace 1)
+    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; VI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; VI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; VI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; VI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; VI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; VI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; VI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 8, addrspace 1)
+    ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
     ; GFX9-LABEL: name: test_store_global_v9s32_align8
     ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; GFX9: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; GFX9: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; GFX9: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, align 8, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, align 8, addrspace 1)
+    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; GFX9: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; GFX9: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; GFX9: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; GFX9: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 8, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
     %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
@@ -8002,133 +8361,65 @@ body: |
     ; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; SI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; SI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; SI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; SI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 16, addrspace 1)
-    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; SI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; SI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; SI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; SI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; SI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; SI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, addrspace 1)
+    ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; SI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; SI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; SI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; SI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 16, addrspace 1)
-    ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; SI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; SI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; SI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; SI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 8, addrspace 1)
-    ; SI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; SI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; SI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; SI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; SI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 16, addrspace 1)
+    ; SI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 16, addrspace 1)
     ; CI-LABEL: name: test_store_global_v9s32_align16
     ; CI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; CI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; CI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; CI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; CI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; CI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; CI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 16, addrspace 1)
-    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; CI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; CI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; CI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; CI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; CI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, addrspace 1)
+    ; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; CI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; CI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 16, addrspace 1)
-    ; CI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; CI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; CI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; CI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 8, addrspace 1)
-    ; CI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; CI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; CI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; CI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; CI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 16, addrspace 1)
+    ; CI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 16, addrspace 1)
     ; VI-LABEL: name: test_store_global_v9s32_align16
     ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; VI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; VI: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; VI: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; VI: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; VI: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 16, addrspace 1)
-    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; VI: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; VI: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; VI: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; VI: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; VI: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, addrspace 1)
+    ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; VI: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; VI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; VI: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; VI: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; VI: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 16, addrspace 1)
-    ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; VI: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; VI: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; VI: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; VI: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 8, addrspace 1)
-    ; VI: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; VI: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; VI: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; VI: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; VI: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 16, addrspace 1)
+    ; VI: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 16, addrspace 1)
     ; GFX9-LABEL: name: test_store_global_v9s32_align16
     ; GFX9: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
     ; GFX9: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     ; GFX9: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7
     ; GFX9: [[COPY3:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr7_vgpr8_vgpr9
-    ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
-    ; GFX9: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
-    ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<3 x s32>)
-    ; GFX9: G_STORE [[UV]](s32), [[COPY]](p1) :: (store 4, align 16, addrspace 1)
-    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<9 x s32>) = G_CONCAT_VECTORS [[COPY1]](<3 x s32>), [[COPY2]](<3 x s32>), [[COPY3]](<3 x s32>)
+    ; GFX9: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 0
+    ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[CONCAT_VECTORS]](<9 x s32>), 128
+    ; GFX9: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY3]](<3 x s32>), 64
+    ; GFX9: G_STORE [[EXTRACT]](<4 x s32>), [[COPY]](p1) :: (store 16, addrspace 1)
+    ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
     ; GFX9: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; GFX9: G_STORE [[UV1]](s32), [[PTR_ADD]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; GFX9: G_STORE [[EXTRACT1]](<4 x s32>), [[PTR_ADD]](p1) :: (store 16, addrspace 1)
+    ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; GFX9: G_STORE [[UV2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
-    ; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; GFX9: G_STORE [[UV3]](s32), [[PTR_ADD2]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; GFX9: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; GFX9: G_STORE [[UV4]](s32), [[PTR_ADD3]](p1) :: (store 4, align 16, addrspace 1)
-    ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 20
-    ; GFX9: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; GFX9: G_STORE [[UV5]](s32), [[PTR_ADD4]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; GFX9: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; GFX9: G_STORE [[UV6]](s32), [[PTR_ADD5]](p1) :: (store 4, align 8, addrspace 1)
-    ; GFX9: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 28
-    ; GFX9: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; GFX9: G_STORE [[UV7]](s32), [[PTR_ADD6]](p1) :: (store 4, addrspace 1)
-    ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; GFX9: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; GFX9: G_STORE [[UV8]](s32), [[PTR_ADD7]](p1) :: (store 4, align 16, addrspace 1)
+    ; GFX9: G_STORE [[EXTRACT2]](s32), [[PTR_ADD1]](p1) :: (store 4, align 16, addrspace 1)
     %0:_(p1) = COPY $vgpr0_vgpr1
     %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
     %2:_(<3 x s32>) = COPY $vgpr5_vgpr6_vgpr7


        


More information about the llvm-commits mailing list