[llvm] 0654ff7 - [AMDGPU] Use ds_read/write_b96/b128 when possible for SDag

Mirko Brkusanin via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 21 03:31:04 PDT 2020


Author: Mirko Brkusanin
Date: 2020-08-21T12:26:31+02:00
New Revision: 0654ff703d4e99423133165db63083b831efb9b6

URL: https://github.com/llvm/llvm-project/commit/0654ff703d4e99423133165db63083b831efb9b6
DIFF: https://github.com/llvm/llvm-project/commit/0654ff703d4e99423133165db63083b831efb9b6.diff

LOG: [AMDGPU] Use ds_read/write_b96/b128 when possible for SDag

Do not break down local loads and stores so ds_read/write_b96/b128 in
ISelLowering can be selected on subtargets that support them and if align
requirements allow them.

Differential Revision: https://reviews.llvm.org/D84403

Added: 
    llvm/test/CodeGen/AMDGPU/load-local.128.ll
    llvm/test/CodeGen/AMDGPU/load-local.96.ll
    llvm/test/CodeGen/AMDGPU/store-local.128.ll
    llvm/test/CodeGen/AMDGPU/store-local.96.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/ds_read2.ll
    llvm/test/CodeGen/AMDGPU/ds_write2.ll
    llvm/test/CodeGen/AMDGPU/insert-subvector-unused-scratch.ll
    llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll
    llvm/test/CodeGen/AMDGPU/load-local-f32.ll
    llvm/test/CodeGen/AMDGPU/load-local-i16.ll
    llvm/test/CodeGen/AMDGPU/load-local-i32.ll
    llvm/test/CodeGen/AMDGPU/load-local-i8.ll
    llvm/test/CodeGen/AMDGPU/store-local.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
index a0ab2873bca3..d5fca0313d75 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -667,6 +667,11 @@ class GCNSubtarget : public AMDGPUGenSubtargetInfo,
     return CIInsts && EnableDS128;
   }
 
+  /// \return If target supports ds_read/write_b96/128.
+  bool hasDS96AndDS128() const {
+    return CIInsts;
+  }
+
   /// Have v_trunc_f64, v_ceil_f64, v_rndne_f64
   bool haveRoundOpsF64() const {
     return CIInsts;

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 39c2ebcf8b5c..3b342776870f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7951,9 +7951,12 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
       llvm_unreachable("unsupported private_element_size");
     }
   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
-    // Use ds_read_b128 if possible.
-    if (Subtarget->useDS128() && Load->getAlignment() >= 16 &&
-        MemVT.getStoreSize() == 16)
+    // Use ds_read_b128 or ds_read_b96 when possible.
+    if (Subtarget->hasDS96AndDS128() &&
+        ((Subtarget->useDS128() && MemVT.getStoreSize() == 16) ||
+         MemVT.getStoreSize() == 12) &&
+        allowsMisalignedMemoryAccessesImpl(MemVT.getSizeInBits(), AS,
+                                           Load->getAlign()))
       return SDValue();
 
     if (NumElements > 2)
@@ -8421,9 +8424,12 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
       llvm_unreachable("unsupported private_element_size");
     }
   } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
-    // Use ds_write_b128 if possible.
-    if (Subtarget->useDS128() && Store->getAlignment() >= 16 &&
-        VT.getStoreSize() == 16 && NumElements != 3)
+    // Use ds_write_b128 or ds_write_b96 when possible.
+    if (Subtarget->hasDS96AndDS128() &&
+        ((Subtarget->useDS128() && VT.getStoreSize() == 16) ||
+         (VT.getStoreSize() == 12)) &&
+        allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AS,
+                                           Store->getAlign()))
       return SDValue();
 
     if (NumElements > 2)

diff  --git a/llvm/test/CodeGen/AMDGPU/ds_read2.ll b/llvm/test/CodeGen/AMDGPU/ds_read2.ll
index f42738f827fc..2454efaa5e35 100644
--- a/llvm/test/CodeGen/AMDGPU/ds_read2.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds_read2.ll
@@ -473,8 +473,9 @@ define amdgpu_kernel void @load_constant_disjoint_offsets(i32 addrspace(1)* %out
 ; GFX9-NOT: m0
 
 ; GCN-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], bar at abs32@lo{{$}}
-; GCN-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:2 offset1:3
-; GCN-DAG: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[PTR]] offset1:1
+; CI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[PTR]] offset1:1
+; CI: ds_read2_b32 v{{\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:2 offset1:3
+; GFX9: ds_read_b128 v{{\[[0-9]+:[0-9]+\]}}, [[PTR]]
 define amdgpu_kernel void @load_misaligned64_constant_offsets(i64 addrspace(1)* %out) {
   %val0 = load i64, i64 addrspace(3)* getelementptr inbounds ([4 x i64], [4 x i64] addrspace(3)* @bar, i32 0, i32 0), align 4
   %val1 = load i64, i64 addrspace(3)* getelementptr inbounds ([4 x i64], [4 x i64] addrspace(3)* @bar, i32 0, i32 1), align 4

diff  --git a/llvm/test/CodeGen/AMDGPU/ds_write2.ll b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
index 99e5ac8f5fc7..6b0ce6391ca8 100644
--- a/llvm/test/CodeGen/AMDGPU/ds_write2.ll
+++ b/llvm/test/CodeGen/AMDGPU/ds_write2.ll
@@ -438,8 +438,10 @@ define amdgpu_kernel void @store_constant_disjoint_offsets() {
 ; GFX9-NOT: m0
 
 ; GCN-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], bar at abs32@lo{{$}}
-; GCN-DAG: ds_write2_b32 [[PTR]], v{{[0-9]+}}, v{{[0-9]+}} offset1:1
-; GCN-DAG: ds_write2_b32 [[PTR]], v{{[0-9]+}}, v{{[0-9]+}} offset0:2 offset1:3
+; CI-DAG: ds_write2_b32 [[PTR]], v{{[0-9]+}}, v{{[0-9]+}} offset1:1
+; CI-DAG: ds_write2_b32 [[PTR]], v{{[0-9]+}}, v{{[0-9]+}} offset0:2 offset1:3
+; GFX9-DAG: ds_write_b128 [[PTR]], {{v\[[0-9]+:[0-9]+\]}}
+
 ; GCN: s_endpgm
 define amdgpu_kernel void @store_misaligned64_constant_offsets() {
   store i64 123, i64 addrspace(3)* getelementptr inbounds ([4 x i64], [4 x i64] addrspace(3)* @bar, i32 0, i32 0), align 4
@@ -509,8 +511,9 @@ define amdgpu_kernel void @write2_sgemm_sequence(float addrspace(1)* %C, i32 %ld
 ; CI: s_mov_b32 m0
 ; GFX9-NOT: m0
 
-; GCN-DAG: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset0:2 offset1:3{{$}}
-; GCN-DAG: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset1:1{{$}}
+; CI: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset1:1{{$}}
+; CI: ds_write2_b32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} offset0:2 offset1:3{{$}}
+; GFX9: ds_write_b128 {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @simple_write2_v4f32_superreg_align4(<4 x float> addrspace(3)* %out, <4 x float> addrspace(1)* %in) #0 {
   %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
   %in.gep = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %in

diff  --git a/llvm/test/CodeGen/AMDGPU/insert-subvector-unused-scratch.ll b/llvm/test/CodeGen/AMDGPU/insert-subvector-unused-scratch.ll
index c4599ee2b353..6a4fcd86f39e 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-subvector-unused-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert-subvector-unused-scratch.ll
@@ -4,10 +4,8 @@
 ; an unused stack slot, causing ScratchSize to be non-zero.
 
 ; GCN-LABEL: store_v3i32:
-; GCN:        ds_read_b32
-; GCN:        ds_read_b64
-; GCN:        ds_write_b32
-; GCN:        ds_write_b64
+; GCN:        ds_read_b96
+; GCN:        ds_write_b96
 ; GCN: ScratchSize: 0
 define amdgpu_kernel void @store_v3i32(<3 x i32> addrspace(3)* %out, <3 x i32> %a) nounwind {
   %val = load <3 x i32>, <3 x i32> addrspace(3)* %out

diff  --git a/llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll b/llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll
index c3975fbab779..975e2306cc32 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-misaligned-bug.ll
@@ -21,10 +21,8 @@ bb:
 }
 
 ; GCN-LABEL: test_local_misaligned_v4:
-; GCN-DAG: ds_read2_b32
-; GCN-DAG: ds_read2_b32
-; GCN-DAG: ds_write2_b32
-; GCN-DAG: ds_write2_b32
+; GCN-DAG: ds_read_b128
+; GCN-DAG: ds_write_b128
 define amdgpu_kernel void @test_local_misaligned_v4(i32 addrspace(3)* %arg) {
 bb:
   %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
@@ -44,10 +42,8 @@ bb:
 }
 
 ; GCN-LABEL: test_local_misaligned_v3:
-; GCN-DAG: ds_read2_b32
-; GCN-DAG: ds_read_b32
-; GCN-DAG: ds_write2_b32
-; GCN-DAG: ds_write_b32
+; GCN-DAG: ds_read_b96
+; GCN-DAG: ds_write_b96
 define amdgpu_kernel void @test_local_misaligned_v3(i32 addrspace(3)* %arg) {
 bb:
   %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
@@ -159,10 +155,8 @@ bb:
 }
 
 ; GCN-LABEL: test_local_aligned_v3:
-; GCN-DAG: ds_read_b64
-; GCN-DAG: ds_read_b32
-; GCN-DAG: ds_write_b64
-; GCN-DAG: ds_write_b32
+; GCN-DAG: ds_read_b96
+; GCN-DAG: ds_write_b96
 define amdgpu_kernel void @test_local_aligned_v3(i32 addrspace(3)* %arg) {
 bb:
   %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
@@ -218,8 +212,8 @@ bb:
 }
 
 ; GCN-LABEL: test_local_v4_aligned8:
-; GCN-DAG: ds_read2_b64
-; GCN-DAG: ds_write2_b64
+; GCN-DAG: ds_read_b128
+; GCN-DAG: ds_write_b128
 define amdgpu_kernel void @test_local_v4_aligned8(i32 addrspace(3)* %arg) {
 bb:
   %lid = tail call i32 @llvm.amdgcn.workitem.id.x()

diff  --git a/llvm/test/CodeGen/AMDGPU/load-local-f32.ll b/llvm/test/CodeGen/AMDGPU/load-local-f32.ll
index a0559c17a161..e78406c92e7b 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-f32.ll
@@ -40,11 +40,13 @@ entry:
 ; SICIVI: s_mov_b32 m0
 ; GFX9-NOT: m0
 
-; GCN-DAG: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:8
-; GCN-DAG: ds_read_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+$}}
+; SI-DAG: ds_read_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:8
+; SI-DAG: ds_read_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+$}}
+; CIVI-DAG: ds_read_b96 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+$}}
 ; GCN: s_waitcnt
-; GCN-DAG: ds_write_b64
-; GCN-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:8{{$}}
+; SI-DAG: ds_write_b64
+; SI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:8{{$}}
+; CIVI-DAG: ds_write_b96 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}
 
 ; EG: LDS_READ_RET
 ; EG: LDS_READ_RET

diff  --git a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
index 357141b36a85..343b37e6098e 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i16.ll
@@ -232,8 +232,10 @@ define amdgpu_kernel void @local_sextload_v2i16_to_v2i32(<2 x i32> addrspace(3)*
 ; SICIVI: s_mov_b32 m0
 
 ; GCN: ds_read_b64
-; GCN-DAG: ds_write_b32
-; GCN-DAG: ds_write_b64
+; SI-DAG: ds_write_b32
+; SI-DAG: ds_write_b64
+; CIVI-DAG: ds_write_b96
+; GFX9-DAG: ds_write_b96
 
 ; EG: LDS_USHORT_READ_RET
 ; EG: LDS_USHORT_READ_RET
@@ -251,8 +253,10 @@ entry:
 ; SICIVI: s_mov_b32 m0
 
 ; GCN: ds_read_b64
-; GCN-DAG: ds_write_b32
-; GCN-DAG: ds_write_b64
+; SI-DAG: ds_write_b32
+; SI-DAG: ds_write_b64
+; CIVI-DAG: ds_write_b96
+; GFX9-DAG: ds_write_b96
 
 ; EG: LDS_USHORT_READ_RET
 ; EG: LDS_USHORT_READ_RET

diff  --git a/llvm/test/CodeGen/AMDGPU/load-local-i32.ll b/llvm/test/CodeGen/AMDGPU/load-local-i32.ll
index 0063bb0b6d21..eca072eb8233 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i32.ll
@@ -39,8 +39,9 @@ entry:
 ; SICIVI: s_mov_b32 m0, -1
 ; GFX9-NOT: m0
 
-; GCN-DAG: ds_read_b64
-; GCN-DAG: ds_read_b32
+; SI-DAG: ds_read_b64
+; SI-DAG: ds_read_b32
+; CIVI-DAG: ds_read_b96
 define amdgpu_kernel void @local_load_v3i32(<3 x i32> addrspace(3)* %out, <3 x i32> addrspace(3)* %in) #0 {
 entry:
   %ld = load <3 x i32>, <3 x i32> addrspace(3)* %in

diff  --git a/llvm/test/CodeGen/AMDGPU/load-local-i8.ll b/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
index 8137ded1454d..4a60ff79e206 100644
--- a/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-local-i8.ll
@@ -207,8 +207,10 @@ entry:
 ; GCN-DAG: v_bfe_i32
 ; GCN-DAG: v_bfe_i32
 
-; GCN-DAG: ds_write_b64
-; GCN-DAG: ds_write_b32
+; SI-DAG: ds_write_b64
+; SI-DAG: ds_write_b32
+; CIVI-DAG: ds_write_b96
+; GFX9-DAG: ds_write_b96
 
 ; EG: LDS_READ_RET
 ; EG-DAG: BFE_INT

diff  --git a/llvm/test/CodeGen/AMDGPU/load-local.128.ll b/llvm/test/CodeGen/AMDGPU/load-local.128.ll
new file mode 100644
index 000000000000..e7f2ddb4fd1a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/load-local.128.ll
@@ -0,0 +1,378 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX6 %s
+
+define <4 x i32> @load_lds_v4i32(<4 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v4i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b128 v[0:3], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v4i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b128 v[0:3], v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v4i32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 8, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_b64 v[2:3], v1
+; GFX6-NEXT:    ds_read_b64 v[0:1], v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr
+  ret <4 x i32> %load
+}
+
+define <4 x i32> @load_lds_v4i32_align1(<4 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v4i32_align1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_u8 v1, v0
+; GFX9-NEXT:    ds_read_u8 v2, v0 offset:1
+; GFX9-NEXT:    ds_read_u8 v3, v0 offset:2
+; GFX9-NEXT:    ds_read_u8 v4, v0 offset:3
+; GFX9-NEXT:    ds_read_u8 v5, v0 offset:4
+; GFX9-NEXT:    ds_read_u8 v6, v0 offset:5
+; GFX9-NEXT:    ds_read_u8 v7, v0 offset:6
+; GFX9-NEXT:    ds_read_u8 v8, v0 offset:7
+; GFX9-NEXT:    ds_read_u8 v9, v0 offset:8
+; GFX9-NEXT:    ds_read_u8 v10, v0 offset:9
+; GFX9-NEXT:    ds_read_u8 v11, v0 offset:10
+; GFX9-NEXT:    ds_read_u8 v12, v0 offset:11
+; GFX9-NEXT:    ds_read_u8 v13, v0 offset:12
+; GFX9-NEXT:    ds_read_u8 v14, v0 offset:13
+; GFX9-NEXT:    ds_read_u8 v15, v0 offset:14
+; GFX9-NEXT:    ds_read_u8 v16, v0 offset:15
+; GFX9-NEXT:    s_waitcnt lgkmcnt(14)
+; GFX9-NEXT:    v_lshl_or_b32 v0, v2, 8, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(12)
+; GFX9-NEXT:    v_lshl_or_b32 v1, v4, 8, v3
+; GFX9-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(10)
+; GFX9-NEXT:    v_lshl_or_b32 v1, v6, 8, v5
+; GFX9-NEXT:    s_waitcnt lgkmcnt(8)
+; GFX9-NEXT:    v_lshl_or_b32 v2, v8, 8, v7
+; GFX9-NEXT:    v_lshl_or_b32 v1, v2, 16, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(6)
+; GFX9-NEXT:    v_lshl_or_b32 v2, v10, 8, v9
+; GFX9-NEXT:    s_waitcnt lgkmcnt(4)
+; GFX9-NEXT:    v_lshl_or_b32 v3, v12, 8, v11
+; GFX9-NEXT:    v_lshl_or_b32 v2, v3, 16, v2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX9-NEXT:    v_lshl_or_b32 v3, v14, 8, v13
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_lshl_or_b32 v4, v16, 8, v15
+; GFX9-NEXT:    v_lshl_or_b32 v3, v4, 16, v3
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v4i32_align1:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_u8 v1, v0 offset:7
+; GFX7-NEXT:    ds_read_u8 v2, v0 offset:6
+; GFX7-NEXT:    ds_read_u8 v3, v0 offset:5
+; GFX7-NEXT:    ds_read_u8 v5, v0 offset:4
+; GFX7-NEXT:    ds_read_u8 v4, v0 offset:3
+; GFX7-NEXT:    ds_read_u8 v6, v0 offset:2
+; GFX7-NEXT:    ds_read_u8 v7, v0 offset:1
+; GFX7-NEXT:    ds_read_u8 v8, v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(7)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(3)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v4, 8, v4
+; GFX7-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX7-NEXT:    v_or_b32_e32 v4, v4, v6
+; GFX7-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v7, 8, v7
+; GFX7-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GFX7-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_or_b32_e32 v7, v7, v8
+; GFX7-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX7-NEXT:    v_or_b32_e32 v3, v3, v5
+; GFX7-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT:    v_or_b32_e32 v4, v4, v7
+; GFX7-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX7-NEXT:    ds_read_u8 v3, v0 offset:15
+; GFX7-NEXT:    ds_read_u8 v5, v0 offset:14
+; GFX7-NEXT:    ds_read_u8 v6, v0 offset:13
+; GFX7-NEXT:    ds_read_u8 v7, v0 offset:12
+; GFX7-NEXT:    ds_read_u8 v2, v0 offset:11
+; GFX7-NEXT:    ds_read_u8 v8, v0 offset:10
+; GFX7-NEXT:    ds_read_u8 v9, v0 offset:9
+; GFX7-NEXT:    ds_read_u8 v0, v0 offset:8
+; GFX7-NEXT:    s_waitcnt lgkmcnt(7)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(3)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX7-NEXT:    v_or_b32_e32 v2, v2, v8
+; GFX7-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v9, 8, v9
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_or_b32_e32 v0, v9, v0
+; GFX7-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT:    v_or_b32_e32 v3, v3, v5
+; GFX7-NEXT:    v_or_b32_e32 v2, v2, v0
+; GFX7-NEXT:    v_lshlrev_b32_e32 v0, 8, v6
+; GFX7-NEXT:    v_or_b32_e32 v0, v0, v7
+; GFX7-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT:    v_or_b32_e32 v3, v3, v0
+; GFX7-NEXT:    v_mov_b32_e32 v0, v4
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v4i32_align1:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 5, v0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 4, v0
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 7, v0
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 6, v0
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 9, v0
+; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 8, v0
+; GFX6-NEXT:    v_add_i32_e32 v7, vcc, 11, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_u8 v2, v2
+; GFX6-NEXT:    ds_read_u8 v3, v3
+; GFX6-NEXT:    ds_read_u8 v4, v4
+; GFX6-NEXT:    ds_read_u8 v5, v5
+; GFX6-NEXT:    ds_read_u8 v6, v6
+; GFX6-NEXT:    ds_read_u8 v7, v7
+; GFX6-NEXT:    ds_read_u8 v1, v1
+; GFX6-NEXT:    ds_read_u8 v8, v0
+; GFX6-NEXT:    v_add_i32_e32 v9, vcc, 14, v0
+; GFX6-NEXT:    v_add_i32_e32 v10, vcc, 3, v0
+; GFX6-NEXT:    v_add_i32_e32 v11, vcc, 2, v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 8, v3
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v4
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 8, v5
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v6
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 8, v7
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 10, v0
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 13, v0
+; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 12, v0
+; GFX6-NEXT:    v_add_i32_e32 v7, vcc, 15, v0
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GFX6-NEXT:    ds_read_u8 v4, v4
+; GFX6-NEXT:    ds_read_u8 v5, v5
+; GFX6-NEXT:    ds_read_u8 v6, v6
+; GFX6-NEXT:    ds_read_u8 v7, v7
+; GFX6-NEXT:    ds_read_u8 v9, v9
+; GFX6-NEXT:    ds_read_u8 v10, v10
+; GFX6-NEXT:    ds_read_u8 v11, v11
+; GFX6-NEXT:    ds_read_u8 v0, v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(7)
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT:    s_waitcnt lgkmcnt(4)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 8, v7
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT:    s_waitcnt lgkmcnt(3)
+; GFX6-NEXT:    v_or_b32_e32 v4, v4, v9
+; GFX6-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 8, v5
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v6
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT:    v_or_b32_e32 v3, v4, v3
+; GFX6-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 8, v10
+; GFX6-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX6-NEXT:    v_or_b32_e32 v4, v4, v11
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v8
+; GFX6-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 1
+  ret <4 x i32> %load
+}
+
+define <4 x i32> @load_lds_v4i32_align2(<4 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v4i32_align2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_u16 v1, v0
+; GFX9-NEXT:    ds_read_u16 v2, v0 offset:2
+; GFX9-NEXT:    ds_read_u16 v3, v0 offset:4
+; GFX9-NEXT:    ds_read_u16 v4, v0 offset:6
+; GFX9-NEXT:    ds_read_u16 v5, v0 offset:8
+; GFX9-NEXT:    ds_read_u16 v6, v0 offset:10
+; GFX9-NEXT:    ds_read_u16 v7, v0 offset:12
+; GFX9-NEXT:    ds_read_u16 v8, v0 offset:14
+; GFX9-NEXT:    s_waitcnt lgkmcnt(6)
+; GFX9-NEXT:    v_lshl_or_b32 v0, v2, 16, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(4)
+; GFX9-NEXT:    v_lshl_or_b32 v1, v4, 16, v3
+; GFX9-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX9-NEXT:    v_lshl_or_b32 v2, v6, 16, v5
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_lshl_or_b32 v3, v8, 16, v7
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v4i32_align2:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_u16 v3, v0 offset:14
+; GFX7-NEXT:    ds_read_u16 v4, v0 offset:12
+; GFX7-NEXT:    ds_read_u16 v2, v0 offset:10
+; GFX7-NEXT:    ds_read_u16 v5, v0 offset:8
+; GFX7-NEXT:    ds_read_u16 v1, v0 offset:6
+; GFX7-NEXT:    ds_read_u16 v6, v0 offset:4
+; GFX7-NEXT:    ds_read_u16 v7, v0 offset:2
+; GFX7-NEXT:    ds_read_u16 v0, v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(5)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(3)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v7, 16, v7
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_or_b32_e32 v0, v7, v0
+; GFX7-NEXT:    v_or_b32_e32 v1, v1, v6
+; GFX7-NEXT:    v_or_b32_e32 v2, v2, v5
+; GFX7-NEXT:    v_or_b32_e32 v3, v3, v4
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v4i32_align2:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 6, v0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 4, v0
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 10, v0
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 8, v0
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 14, v0
+; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 12, v0
+; GFX6-NEXT:    v_add_i32_e32 v7, vcc, 2, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_u16 v2, v2
+; GFX6-NEXT:    ds_read_u16 v3, v3
+; GFX6-NEXT:    ds_read_u16 v4, v4
+; GFX6-NEXT:    ds_read_u16 v5, v5
+; GFX6-NEXT:    ds_read_u16 v6, v6
+; GFX6-NEXT:    ds_read_u16 v7, v7
+; GFX6-NEXT:    ds_read_u16 v1, v1
+; GFX6-NEXT:    ds_read_u16 v0, v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v4
+; GFX6-NEXT:    v_lshlrev_b32_e32 v4, 16, v7
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v6
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_or_b32_e32 v0, v4, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 2
+  ret <4 x i32> %load
+}
+
+define <4 x i32> @load_lds_v4i32_align4(<4 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v4i32_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b128 v[0:3], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v4i32_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, v0
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read2_b32 v[0:1], v0 offset1:1
+; GFX7-NEXT:    ds_read2_b32 v[2:3], v2 offset0:2 offset1:3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v4i32_align4:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 4, v0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 8, v0
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 12, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_b32 v2, v2
+; GFX6-NEXT:    ds_read_b32 v3, v3
+; GFX6-NEXT:    ds_read_b32 v1, v1
+; GFX6-NEXT:    ds_read_b32 v0, v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 4
+  ret <4 x i32> %load
+}
+
+define <4 x i32> @load_lds_v4i32_align8(<4 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v4i32_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b128 v[0:3], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v4i32_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read2_b64 v[0:3], v0 offset1:1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v4i32_align8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 8, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_b64 v[2:3], v1
+; GFX6-NEXT:    ds_read_b64 v[0:1], v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 8
+  ret <4 x i32> %load
+}
+
+define <4 x i32> @load_lds_v4i32_align16(<4 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v4i32_align16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b128 v[0:3], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v4i32_align16:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b128 v[0:3], v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v4i32_align16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 8, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_b64 v[2:3], v1
+; GFX6-NEXT:    ds_read_b64 v[0:1], v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 16
+  ret <4 x i32> %load
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/load-local.96.ll b/llvm/test/CodeGen/AMDGPU/load-local.96.ll
new file mode 100644
index 000000000000..d210d4edeba1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/load-local.96.ll
@@ -0,0 +1,331 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX6 %s
+
+define <3 x i32> @load_lds_v3i32(<3 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v3i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b96 v[0:2], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v3i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b96 v[0:2], v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v3i32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 8, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_b32 v2, v1
+; GFX6-NEXT:    ds_read_b64 v[0:1], v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr
+  ret <3 x i32> %load
+}
+
+define <3 x i32> @load_lds_v3i32_align1(<3 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v3i32_align1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_u8 v1, v0
+; GFX9-NEXT:    ds_read_u8 v2, v0 offset:1
+; GFX9-NEXT:    ds_read_u8 v3, v0 offset:2
+; GFX9-NEXT:    ds_read_u8 v4, v0 offset:3
+; GFX9-NEXT:    ds_read_u8 v5, v0 offset:4
+; GFX9-NEXT:    ds_read_u8 v6, v0 offset:5
+; GFX9-NEXT:    ds_read_u8 v7, v0 offset:6
+; GFX9-NEXT:    ds_read_u8 v8, v0 offset:7
+; GFX9-NEXT:    ds_read_u8 v9, v0 offset:8
+; GFX9-NEXT:    ds_read_u8 v10, v0 offset:9
+; GFX9-NEXT:    ds_read_u8 v11, v0 offset:10
+; GFX9-NEXT:    ds_read_u8 v12, v0 offset:11
+; GFX9-NEXT:    s_waitcnt lgkmcnt(10)
+; GFX9-NEXT:    v_lshl_or_b32 v0, v2, 8, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(8)
+; GFX9-NEXT:    v_lshl_or_b32 v1, v4, 8, v3
+; GFX9-NEXT:    v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(6)
+; GFX9-NEXT:    v_lshl_or_b32 v1, v6, 8, v5
+; GFX9-NEXT:    s_waitcnt lgkmcnt(4)
+; GFX9-NEXT:    v_lshl_or_b32 v2, v8, 8, v7
+; GFX9-NEXT:    v_lshl_or_b32 v1, v2, 16, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX9-NEXT:    v_lshl_or_b32 v2, v10, 8, v9
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_lshl_or_b32 v3, v12, 8, v11
+; GFX9-NEXT:    v_lshl_or_b32 v2, v3, 16, v2
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v3i32_align1:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_u8 v1, v0 offset:7
+; GFX7-NEXT:    ds_read_u8 v2, v0 offset:6
+; GFX7-NEXT:    ds_read_u8 v4, v0 offset:5
+; GFX7-NEXT:    ds_read_u8 v5, v0 offset:4
+; GFX7-NEXT:    ds_read_u8 v3, v0 offset:3
+; GFX7-NEXT:    ds_read_u8 v6, v0 offset:2
+; GFX7-NEXT:    ds_read_u8 v7, v0 offset:1
+; GFX7-NEXT:    ds_read_u8 v8, v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(7)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(5)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v4, 8, v4
+; GFX7-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(4)
+; GFX7-NEXT:    v_or_b32_e32 v4, v4, v5
+; GFX7-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT:    v_or_b32_e32 v1, v1, v4
+; GFX7-NEXT:    ds_read_u8 v2, v0 offset:11
+; GFX7-NEXT:    ds_read_u8 v4, v0 offset:10
+; GFX7-NEXT:    ds_read_u8 v5, v0 offset:9
+; GFX7-NEXT:    ds_read_u8 v0, v0 offset:8
+; GFX7-NEXT:    s_waitcnt lgkmcnt(7)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GFX7-NEXT:    s_waitcnt lgkmcnt(5)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v7, 8, v7
+; GFX7-NEXT:    v_or_b32_e32 v3, v3, v6
+; GFX7-NEXT:    s_waitcnt lgkmcnt(3)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX7-NEXT:    v_or_b32_e32 v2, v2, v4
+; GFX7-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NEXT:    v_or_b32_e32 v7, v7, v8
+; GFX7-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT:    v_or_b32_e32 v3, v3, v7
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_or_b32_e32 v0, v5, v0
+; GFX7-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT:    v_or_b32_e32 v2, v2, v0
+; GFX7-NEXT:    v_mov_b32_e32 v0, v3
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v3i32_align1:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 5, v0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 4, v0
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 7, v0
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 6, v0
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 9, v0
+; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 8, v0
+; GFX6-NEXT:    v_add_i32_e32 v7, vcc, 11, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_u8 v2, v2
+; GFX6-NEXT:    ds_read_u8 v3, v3
+; GFX6-NEXT:    ds_read_u8 v4, v4
+; GFX6-NEXT:    ds_read_u8 v5, v5
+; GFX6-NEXT:    ds_read_u8 v6, v6
+; GFX6-NEXT:    ds_read_u8 v7, v7
+; GFX6-NEXT:    ds_read_u8 v1, v1
+; GFX6-NEXT:    ds_read_u8 v8, v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 8, v3
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v4
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v2, v1
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 8, v5
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v6
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 10, v0
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 3, v0
+; GFX6-NEXT:    v_add_i32_e32 v6, vcc, 2, v0
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GFX6-NEXT:    ds_read_u8 v4, v4
+; GFX6-NEXT:    ds_read_u8 v5, v5
+; GFX6-NEXT:    ds_read_u8 v6, v6
+; GFX6-NEXT:    ds_read_u8 v0, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 8, v7
+; GFX6-NEXT:    s_waitcnt lgkmcnt(3)
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX6-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 8, v5
+; GFX6-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX6-NEXT:    v_or_b32_e32 v3, v3, v6
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT:    v_or_b32_e32 v0, v0, v8
+; GFX6-NEXT:    v_or_b32_e32 v0, v3, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 1
+  ret <3 x i32> %load
+}
+
+define <3 x i32> @load_lds_v3i32_align2(<3 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v3i32_align2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_u16 v1, v0
+; GFX9-NEXT:    ds_read_u16 v2, v0 offset:2
+; GFX9-NEXT:    ds_read_u16 v3, v0 offset:4
+; GFX9-NEXT:    ds_read_u16 v4, v0 offset:6
+; GFX9-NEXT:    ds_read_u16 v5, v0 offset:8
+; GFX9-NEXT:    ds_read_u16 v6, v0 offset:10
+; GFX9-NEXT:    s_waitcnt lgkmcnt(4)
+; GFX9-NEXT:    v_lshl_or_b32 v0, v2, 16, v1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX9-NEXT:    v_lshl_or_b32 v1, v4, 16, v3
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_lshl_or_b32 v2, v6, 16, v5
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v3i32_align2:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_u16 v2, v0 offset:10
+; GFX7-NEXT:    ds_read_u16 v3, v0 offset:8
+; GFX7-NEXT:    ds_read_u16 v1, v0 offset:6
+; GFX7-NEXT:    ds_read_u16 v4, v0 offset:4
+; GFX7-NEXT:    ds_read_u16 v5, v0 offset:2
+; GFX7-NEXT:    ds_read_u16 v0, v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(5)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(3)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(2)
+; GFX7-NEXT:    v_or_b32_e32 v1, v1, v4
+; GFX7-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX7-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_or_b32_e32 v0, v5, v0
+; GFX7-NEXT:    v_or_b32_e32 v2, v2, v3
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v3i32_align2:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 6, v0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 4, v0
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, 10, v0
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, 8, v0
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_u16 v2, v2
+; GFX6-NEXT:    ds_read_u16 v3, v3
+; GFX6-NEXT:    ds_read_u16 v4, v4
+; GFX6-NEXT:    ds_read_u16 v5, v5
+; GFX6-NEXT:    ds_read_u16 v1, v1
+; GFX6-NEXT:    ds_read_u16 v0, v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(1)
+; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; GFX6-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
+; GFX6-NEXT:    v_or_b32_e32 v2, v2, v4
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_or_b32_e32 v0, v3, v0
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 2
+  ret <3 x i32> %load
+}
+
+define <3 x i32> @load_lds_v3i32_align4(<3 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v3i32_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b96 v[0:2], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v3i32_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, v0
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read2_b32 v[0:1], v0 offset1:1
+; GFX7-NEXT:    ds_read_b32 v2, v2 offset:8
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v3i32_align4:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 4, v0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 8, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_b32 v2, v2
+; GFX6-NEXT:    ds_read_b32 v1, v1
+; GFX6-NEXT:    ds_read_b32 v0, v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 4
+  ret <3 x i32> %load
+}
+
+define <3 x i32> @load_lds_v3i32_align8(<3 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v3i32_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b96 v[0:2], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v3i32_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, v0
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read2_b32 v[0:1], v0 offset1:1
+; GFX7-NEXT:    ds_read_b32 v2, v2 offset:8
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v3i32_align8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 4, v0
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 8, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_b32 v2, v2
+; GFX6-NEXT:    ds_read_b32 v1, v1
+; GFX6-NEXT:    ds_read_b32 v0, v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 8
+  ret <3 x i32> %load
+}
+
+define <3 x i32> @load_lds_v3i32_align16(<3 x i32> addrspace(3)* %ptr) {
+; GFX9-LABEL: load_lds_v3i32_align16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    ds_read_b96 v[0:2], v0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: load_lds_v3i32_align16:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    ds_read_b96 v[0:2], v0
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX6-LABEL: load_lds_v3i32_align16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT:    v_add_i32_e32 v1, vcc, 8, v0
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    ds_read_b32 v2, v1
+; GFX6-NEXT:    ds_read_b64 v[0:1], v0
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 16
+  ret <3 x i32> %load
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/store-local.128.ll b/llvm/test/CodeGen/AMDGPU/store-local.128.ll
new file mode 100644
index 000000000000..3fa202768f48
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/store-local.128.ll
@@ -0,0 +1,422 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX6 %s
+
+define amdgpu_kernel void @store_lds_v4i32(<4 x i32> addrspace(3)* %out, <4 x i32> %x) {
+; GFX9-LABEL: store_lds_v4i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
+; GFX9-NEXT:    ds_write_b128 v4, v[0:3]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v4i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v4, s4
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    v_mov_b32_e32 v3, s3
+; GFX7-NEXT:    ds_write_b128 v4, v[0:3]
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v4i32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v4, s4
+; GFX6-NEXT:    v_mov_b32_e32 v0, s2
+; GFX6-NEXT:    v_mov_b32_e32 v1, s3
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    v_mov_b32_e32 v3, s1
+; GFX6-NEXT:    ds_write2_b64 v4, v[2:3], v[0:1] offset1:1
+; GFX6-NEXT:    s_endpgm
+  store <4 x i32> %x, <4 x i32> addrspace(3)* %out
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v4i32_align1(<4 x i32> addrspace(3)* %out, <4 x i32> %x) {
+; GFX9-LABEL: store_lds_v4i32_align1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    s_lshr_b32 s4, s2, 8
+; GFX9-NEXT:    s_lshr_b32 s2, s2, 24
+; GFX9-NEXT:    v_mov_b32_e32 v4, s2
+; GFX9-NEXT:    s_lshr_b32 s2, s3, 8
+; GFX9-NEXT:    v_mov_b32_e32 v6, s0
+; GFX9-NEXT:    ds_write_b8 v0, v2 offset:8
+; GFX9-NEXT:    ds_write_b8_d16_hi v0, v2 offset:10
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    s_lshr_b32 s2, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX9-NEXT:    v_mov_b32_e32 v8, s0
+; GFX9-NEXT:    s_lshr_b32 s0, s1, 8
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    v_mov_b32_e32 v5, s1
+; GFX9-NEXT:    ds_write_b8 v0, v6
+; GFX9-NEXT:    ds_write_b8_d16_hi v0, v6 offset:2
+; GFX9-NEXT:    v_mov_b32_e32 v6, s0
+; GFX9-NEXT:    s_lshr_b32 s0, s3, 24
+; GFX9-NEXT:    ds_write_b8 v0, v1 offset:12
+; GFX9-NEXT:    ds_write_b8 v0, v5 offset:4
+; GFX9-NEXT:    ds_write_b8 v0, v2 offset:13
+; GFX9-NEXT:    v_mov_b32_e32 v2, s0
+; GFX9-NEXT:    s_lshr_b32 s0, s1, 24
+; GFX9-NEXT:    v_mov_b32_e32 v3, s4
+; GFX9-NEXT:    ds_write_b8_d16_hi v0, v1 offset:14
+; GFX9-NEXT:    ds_write_b8 v0, v2 offset:15
+; GFX9-NEXT:    ds_write_b8 v0, v3 offset:9
+; GFX9-NEXT:    ds_write_b8 v0, v4 offset:11
+; GFX9-NEXT:    ds_write_b8 v0, v6 offset:5
+; GFX9-NEXT:    v_mov_b32_e32 v7, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    ds_write_b8_d16_hi v0, v5 offset:6
+; GFX9-NEXT:    ds_write_b8 v0, v1 offset:7
+; GFX9-NEXT:    ds_write_b8 v0, v7 offset:1
+; GFX9-NEXT:    ds_write_b8 v0, v8 offset:3
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v4i32_align1:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-NEXT:    s_lshr_b32 s4, s3, 8
+; GFX7-NEXT:    v_mov_b32_e32 v5, s4
+; GFX7-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX7-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-NEXT:    s_lshr_b32 s3, s3, 24
+; GFX7-NEXT:    ds_write_b8 v0, v5 offset:13
+; GFX7-NEXT:    v_mov_b32_e32 v5, s3
+; GFX7-NEXT:    s_lshr_b32 s3, s2, 8
+; GFX7-NEXT:    v_mov_b32_e32 v6, s4
+; GFX7-NEXT:    ds_write_b8 v0, v5 offset:15
+; GFX7-NEXT:    ds_write_b8 v0, v6 offset:14
+; GFX7-NEXT:    v_mov_b32_e32 v5, s3
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    s_lshr_b32 s3, s2, 16
+; GFX7-NEXT:    s_lshr_b32 s2, s2, 24
+; GFX7-NEXT:    ds_write_b8 v0, v2 offset:8
+; GFX7-NEXT:    ds_write_b8 v0, v5 offset:9
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    s_lshr_b32 s2, s1, 8
+; GFX7-NEXT:    v_mov_b32_e32 v6, s3
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:12
+; GFX7-NEXT:    ds_write_b8 v0, v2 offset:11
+; GFX7-NEXT:    ds_write_b8 v0, v6 offset:10
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    s_lshr_b32 s2, s1, 16
+; GFX7-NEXT:    v_mov_b32_e32 v3, s1
+; GFX7-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:5
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:7
+; GFX7-NEXT:    ds_write_b8 v0, v2 offset:6
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_mov_b32_e32 v4, s0
+; GFX7-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX7-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX7-NEXT:    ds_write_b8 v0, v4
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:1
+; GFX7-NEXT:    v_mov_b32_e32 v2, s1
+; GFX7-NEXT:    v_mov_b32_e32 v1, s0
+; GFX7-NEXT:    ds_write_b8 v0, v3 offset:4
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:3
+; GFX7-NEXT:    ds_write_b8 v0, v2 offset:2
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v4i32_align1:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    s_lshr_b32 s4, s3, 8
+; GFX6-NEXT:    v_mov_b32_e32 v5, s4
+; GFX6-NEXT:    s_lshr_b32 s4, s3, 16
+; GFX6-NEXT:    v_mov_b32_e32 v1, s3
+; GFX6-NEXT:    s_lshr_b32 s3, s3, 24
+; GFX6-NEXT:    ds_write_b8 v0, v5 offset:13
+; GFX6-NEXT:    v_mov_b32_e32 v5, s3
+; GFX6-NEXT:    s_lshr_b32 s3, s2, 8
+; GFX6-NEXT:    v_mov_b32_e32 v6, s4
+; GFX6-NEXT:    ds_write_b8 v0, v5 offset:15
+; GFX6-NEXT:    ds_write_b8 v0, v6 offset:14
+; GFX6-NEXT:    v_mov_b32_e32 v5, s3
+; GFX6-NEXT:    v_mov_b32_e32 v2, s2
+; GFX6-NEXT:    s_lshr_b32 s3, s2, 16
+; GFX6-NEXT:    s_lshr_b32 s2, s2, 24
+; GFX6-NEXT:    ds_write_b8 v0, v2 offset:8
+; GFX6-NEXT:    ds_write_b8 v0, v5 offset:9
+; GFX6-NEXT:    v_mov_b32_e32 v2, s2
+; GFX6-NEXT:    s_lshr_b32 s2, s1, 8
+; GFX6-NEXT:    v_mov_b32_e32 v6, s3
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:12
+; GFX6-NEXT:    ds_write_b8 v0, v2 offset:11
+; GFX6-NEXT:    ds_write_b8 v0, v6 offset:10
+; GFX6-NEXT:    v_mov_b32_e32 v1, s2
+; GFX6-NEXT:    s_lshr_b32 s2, s1, 16
+; GFX6-NEXT:    v_mov_b32_e32 v3, s1
+; GFX6-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:5
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX6-NEXT:    v_mov_b32_e32 v2, s2
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:7
+; GFX6-NEXT:    ds_write_b8 v0, v2 offset:6
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    v_mov_b32_e32 v4, s0
+; GFX6-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX6-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX6-NEXT:    ds_write_b8 v0, v4
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:1
+; GFX6-NEXT:    v_mov_b32_e32 v2, s1
+; GFX6-NEXT:    v_mov_b32_e32 v1, s0
+; GFX6-NEXT:    ds_write_b8 v0, v3 offset:4
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:3
+; GFX6-NEXT:    ds_write_b8 v0, v2 offset:2
+; GFX6-NEXT:    s_endpgm
+  store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 1
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v4i32_align2(<4 x i32> addrspace(3)* %out, <4 x i32> %x) {
+; GFX9-LABEL: store_lds_v4i32_align2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    v_mov_b32_e32 v4, s0
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s1
+; GFX9-NEXT:    ds_write_b16_d16_hi v0, v1 offset:14
+; GFX9-NEXT:    ds_write_b16 v0, v4
+; GFX9-NEXT:    ds_write_b16 v0, v3 offset:4
+; GFX9-NEXT:    ds_write_b16 v0, v2 offset:8
+; GFX9-NEXT:    ds_write_b16 v0, v1 offset:12
+; GFX9-NEXT:    ds_write_b16_d16_hi v0, v2 offset:10
+; GFX9-NEXT:    ds_write_b16_d16_hi v0, v3 offset:6
+; GFX9-NEXT:    ds_write_b16_d16_hi v0, v4 offset:2
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v4i32_align2:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-NEXT:    v_mov_b32_e32 v4, s0
+; GFX7-NEXT:    s_lshr_b32 s0, s0, 16
+; GFX7-NEXT:    v_mov_b32_e32 v5, s0
+; GFX7-NEXT:    s_lshr_b32 s0, s1, 16
+; GFX7-NEXT:    v_mov_b32_e32 v3, s1
+; GFX7-NEXT:    ds_write_b16 v0, v4
+; GFX7-NEXT:    v_mov_b32_e32 v4, s0
+; GFX7-NEXT:    s_lshr_b32 s0, s2, 16
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    ds_write_b16 v0, v3 offset:4
+; GFX7-NEXT:    v_mov_b32_e32 v3, s0
+; GFX7-NEXT:    s_lshr_b32 s0, s3, 16
+; GFX7-NEXT:    ds_write_b16 v0, v2 offset:8
+; GFX7-NEXT:    v_mov_b32_e32 v2, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-NEXT:    ds_write_b16 v0, v2 offset:14
+; GFX7-NEXT:    ds_write_b16 v0, v1 offset:12
+; GFX7-NEXT:    ds_write_b16 v0, v3 offset:10
+; GFX7-NEXT:    ds_write_b16 v0, v4 offset:6
+; GFX7-NEXT:    ds_write_b16 v0, v5 offset:2
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v4i32_align2:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    v_mov_b32_e32 v4, s0
+; GFX6-NEXT:    s_lshr_b32 s0, s0, 16
+; GFX6-NEXT:    v_mov_b32_e32 v5, s0
+; GFX6-NEXT:    s_lshr_b32 s0, s1, 16
+; GFX6-NEXT:    v_mov_b32_e32 v3, s1
+; GFX6-NEXT:    ds_write_b16 v0, v4
+; GFX6-NEXT:    v_mov_b32_e32 v4, s0
+; GFX6-NEXT:    s_lshr_b32 s0, s2, 16
+; GFX6-NEXT:    v_mov_b32_e32 v2, s2
+; GFX6-NEXT:    ds_write_b16 v0, v3 offset:4
+; GFX6-NEXT:    v_mov_b32_e32 v3, s0
+; GFX6-NEXT:    s_lshr_b32 s0, s3, 16
+; GFX6-NEXT:    ds_write_b16 v0, v2 offset:8
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    v_mov_b32_e32 v1, s3
+; GFX6-NEXT:    ds_write_b16 v0, v2 offset:14
+; GFX6-NEXT:    ds_write_b16 v0, v1 offset:12
+; GFX6-NEXT:    ds_write_b16 v0, v3 offset:10
+; GFX6-NEXT:    ds_write_b16 v0, v4 offset:6
+; GFX6-NEXT:    ds_write_b16 v0, v5 offset:2
+; GFX6-NEXT:    s_endpgm
+  store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 2
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v4i32_align4(<4 x i32> addrspace(3)* %out, <4 x i32> %x) {
+; GFX9-LABEL: store_lds_v4i32_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
+; GFX9-NEXT:    ds_write_b128 v4, v[0:3]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v4i32_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-NEXT:    v_mov_b32_e32 v1, s0
+; GFX7-NEXT:    v_mov_b32_e32 v2, s1
+; GFX7-NEXT:    v_mov_b32_e32 v3, s2
+; GFX7-NEXT:    v_mov_b32_e32 v4, s3
+; GFX7-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
+; GFX7-NEXT:    ds_write2_b32 v0, v3, v4 offset0:2 offset1:3
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v4i32_align4:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    ds_write2_b32 v0, v2, v1 offset1:1
+; GFX6-NEXT:    v_mov_b32_e32 v1, s3
+; GFX6-NEXT:    v_mov_b32_e32 v2, s2
+; GFX6-NEXT:    ds_write2_b32 v0, v2, v1 offset0:2 offset1:3
+; GFX6-NEXT:    s_endpgm
+  store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v4i32_align8(<4 x i32> addrspace(3)* %out, <4 x i32> %x) {
+; GFX9-LABEL: store_lds_v4i32_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
+; GFX9-NEXT:    ds_write_b128 v4, v[0:3]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v4i32_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v4, s4
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_mov_b32_e32 v3, s3
+; GFX7-NEXT:    ds_write2_b64 v4, v[0:1], v[2:3] offset1:1
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v4i32_align8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v4, s4
+; GFX6-NEXT:    v_mov_b32_e32 v0, s2
+; GFX6-NEXT:    v_mov_b32_e32 v1, s3
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    v_mov_b32_e32 v3, s1
+; GFX6-NEXT:    ds_write2_b64 v4, v[2:3], v[0:1] offset1:1
+; GFX6-NEXT:    s_endpgm
+  store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 8
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v4i32_align16(<4 x i32> addrspace(3)* %out, <4 x i32> %x) {
+; GFX9-LABEL: store_lds_v4i32_align16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v4, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s3
+; GFX9-NEXT:    ds_write_b128 v4, v[0:3]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v4i32_align16:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v4, s4
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    v_mov_b32_e32 v3, s3
+; GFX7-NEXT:    ds_write_b128 v4, v[0:3]
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v4i32_align16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v4, s4
+; GFX6-NEXT:    v_mov_b32_e32 v0, s2
+; GFX6-NEXT:    v_mov_b32_e32 v1, s3
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    v_mov_b32_e32 v3, s1
+; GFX6-NEXT:    ds_write2_b64 v4, v[2:3], v[0:1] offset1:1
+; GFX6-NEXT:    s_endpgm
+  store <4 x i32> %x, <4 x i32> addrspace(3)* %out, align 16
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/store-local.96.ll b/llvm/test/CodeGen/AMDGPU/store-local.96.ll
new file mode 100644
index 000000000000..351b632d0647
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/store-local.96.ll
@@ -0,0 +1,370 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX6 %s
+
+define amdgpu_kernel void @store_lds_v3i32(<3 x i32> addrspace(3)* %out, <3 x i32> %x) {
+; GFX9-LABEL: store_lds_v3i32:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    ds_write_b96 v3, v[0:2]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v3i32:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v3, s4
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    ds_write_b96 v3, v[0:2]
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v3i32:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v2, s4
+; GFX6-NEXT:    v_mov_b32_e32 v3, s2
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    ds_write_b32 v2, v3 offset:8
+; GFX6-NEXT:    ds_write_b64 v2, v[0:1]
+; GFX6-NEXT:    s_endpgm
+  store <3 x i32> %x, <3 x i32> addrspace(3)* %out
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v3i32_align1(<3 x i32> addrspace(3)* %out, <3 x i32> %x) {
+; GFX9-LABEL: store_lds_v3i32_align1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    s_lshr_b32 s3, s2, 8
+; GFX9-NEXT:    v_mov_b32_e32 v4, s0
+; GFX9-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-NEXT:    s_lshr_b32 s3, s0, 8
+; GFX9-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX9-NEXT:    v_mov_b32_e32 v6, s0
+; GFX9-NEXT:    s_lshr_b32 s0, s1, 8
+; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v3, s1
+; GFX9-NEXT:    ds_write_b8 v0, v4
+; GFX9-NEXT:    ds_write_b8_d16_hi v0, v4 offset:2
+; GFX9-NEXT:    v_mov_b32_e32 v4, s0
+; GFX9-NEXT:    s_lshr_b32 s0, s2, 24
+; GFX9-NEXT:    ds_write_b8 v0, v1 offset:8
+; GFX9-NEXT:    ds_write_b8 v0, v3 offset:4
+; GFX9-NEXT:    ds_write_b8 v0, v2 offset:9
+; GFX9-NEXT:    v_mov_b32_e32 v2, s0
+; GFX9-NEXT:    s_lshr_b32 s0, s1, 24
+; GFX9-NEXT:    ds_write_b8_d16_hi v0, v1 offset:10
+; GFX9-NEXT:    ds_write_b8 v0, v2 offset:11
+; GFX9-NEXT:    ds_write_b8 v0, v4 offset:5
+; GFX9-NEXT:    v_mov_b32_e32 v5, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    ds_write_b8_d16_hi v0, v3 offset:6
+; GFX9-NEXT:    ds_write_b8 v0, v1 offset:7
+; GFX9-NEXT:    ds_write_b8 v0, v5 offset:1
+; GFX9-NEXT:    ds_write_b8 v0, v6 offset:3
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v3i32_align1:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-NEXT:    s_lshr_b32 s3, s2, 8
+; GFX7-NEXT:    v_mov_b32_e32 v4, s3
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    s_lshr_b32 s3, s2, 16
+; GFX7-NEXT:    s_lshr_b32 s2, s2, 24
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:8
+; GFX7-NEXT:    ds_write_b8 v0, v4 offset:9
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    s_lshr_b32 s2, s1, 8
+; GFX7-NEXT:    v_mov_b32_e32 v5, s3
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:11
+; GFX7-NEXT:    ds_write_b8 v0, v5 offset:10
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    s_lshr_b32 s2, s1, 16
+; GFX7-NEXT:    v_mov_b32_e32 v2, s1
+; GFX7-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:5
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX7-NEXT:    v_mov_b32_e32 v4, s2
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:7
+; GFX7-NEXT:    ds_write_b8 v0, v4 offset:6
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_mov_b32_e32 v3, s0
+; GFX7-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX7-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX7-NEXT:    ds_write_b8 v0, v3
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:1
+; GFX7-NEXT:    v_mov_b32_e32 v4, s1
+; GFX7-NEXT:    v_mov_b32_e32 v1, s0
+; GFX7-NEXT:    ds_write_b8 v0, v2 offset:4
+; GFX7-NEXT:    ds_write_b8 v0, v1 offset:3
+; GFX7-NEXT:    ds_write_b8 v0, v4 offset:2
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v3i32_align1:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    s_lshr_b32 s3, s2, 8
+; GFX6-NEXT:    v_mov_b32_e32 v4, s3
+; GFX6-NEXT:    v_mov_b32_e32 v1, s2
+; GFX6-NEXT:    s_lshr_b32 s3, s2, 16
+; GFX6-NEXT:    s_lshr_b32 s2, s2, 24
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:8
+; GFX6-NEXT:    ds_write_b8 v0, v4 offset:9
+; GFX6-NEXT:    v_mov_b32_e32 v1, s2
+; GFX6-NEXT:    s_lshr_b32 s2, s1, 8
+; GFX6-NEXT:    v_mov_b32_e32 v5, s3
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:11
+; GFX6-NEXT:    ds_write_b8 v0, v5 offset:10
+; GFX6-NEXT:    v_mov_b32_e32 v1, s2
+; GFX6-NEXT:    s_lshr_b32 s2, s1, 16
+; GFX6-NEXT:    v_mov_b32_e32 v2, s1
+; GFX6-NEXT:    s_lshr_b32 s1, s1, 24
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:5
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    s_lshr_b32 s1, s0, 8
+; GFX6-NEXT:    v_mov_b32_e32 v4, s2
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:7
+; GFX6-NEXT:    ds_write_b8 v0, v4 offset:6
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    v_mov_b32_e32 v3, s0
+; GFX6-NEXT:    s_lshr_b32 s1, s0, 16
+; GFX6-NEXT:    s_lshr_b32 s0, s0, 24
+; GFX6-NEXT:    ds_write_b8 v0, v3
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:1
+; GFX6-NEXT:    v_mov_b32_e32 v4, s1
+; GFX6-NEXT:    v_mov_b32_e32 v1, s0
+; GFX6-NEXT:    ds_write_b8 v0, v2 offset:4
+; GFX6-NEXT:    ds_write_b8 v0, v1 offset:3
+; GFX6-NEXT:    ds_write_b8 v0, v4 offset:2
+; GFX6-NEXT:    s_endpgm
+  store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 1
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v3i32_align2(<3 x i32> addrspace(3)* %out, <3 x i32> %x) {
+; GFX9-LABEL: store_lds_v3i32_align2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v2, s1
+; GFX9-NEXT:    v_mov_b32_e32 v3, s0
+; GFX9-NEXT:    ds_write_b16_d16_hi v0, v1 offset:10
+; GFX9-NEXT:    ds_write_b16 v0, v3
+; GFX9-NEXT:    ds_write_b16 v0, v2 offset:4
+; GFX9-NEXT:    ds_write_b16 v0, v1 offset:8
+; GFX9-NEXT:    ds_write_b16_d16_hi v0, v2 offset:6
+; GFX9-NEXT:    ds_write_b16_d16_hi v0, v3 offset:2
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v3i32_align2:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-NEXT:    v_mov_b32_e32 v3, s0
+; GFX7-NEXT:    s_lshr_b32 s0, s0, 16
+; GFX7-NEXT:    v_mov_b32_e32 v4, s0
+; GFX7-NEXT:    s_lshr_b32 s0, s1, 16
+; GFX7-NEXT:    v_mov_b32_e32 v2, s1
+; GFX7-NEXT:    ds_write_b16 v0, v3
+; GFX7-NEXT:    v_mov_b32_e32 v3, s0
+; GFX7-NEXT:    s_lshr_b32 s0, s2, 16
+; GFX7-NEXT:    ds_write_b16 v0, v2 offset:4
+; GFX7-NEXT:    v_mov_b32_e32 v2, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s2
+; GFX7-NEXT:    ds_write_b16 v0, v2 offset:10
+; GFX7-NEXT:    ds_write_b16 v0, v1 offset:8
+; GFX7-NEXT:    ds_write_b16 v0, v3 offset:6
+; GFX7-NEXT:    ds_write_b16 v0, v4 offset:2
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v3i32_align2:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    v_mov_b32_e32 v3, s0
+; GFX6-NEXT:    s_lshr_b32 s0, s0, 16
+; GFX6-NEXT:    v_mov_b32_e32 v4, s0
+; GFX6-NEXT:    s_lshr_b32 s0, s1, 16
+; GFX6-NEXT:    v_mov_b32_e32 v2, s1
+; GFX6-NEXT:    ds_write_b16 v0, v3
+; GFX6-NEXT:    v_mov_b32_e32 v3, s0
+; GFX6-NEXT:    s_lshr_b32 s0, s2, 16
+; GFX6-NEXT:    ds_write_b16 v0, v2 offset:4
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    v_mov_b32_e32 v1, s2
+; GFX6-NEXT:    ds_write_b16 v0, v2 offset:10
+; GFX6-NEXT:    ds_write_b16 v0, v1 offset:8
+; GFX6-NEXT:    ds_write_b16 v0, v3 offset:6
+; GFX6-NEXT:    ds_write_b16 v0, v4 offset:2
+; GFX6-NEXT:    s_endpgm
+  store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 2
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v3i32_align4(<3 x i32> addrspace(3)* %out, <3 x i32> %x) {
+; GFX9-LABEL: store_lds_v3i32_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    ds_write_b96 v3, v[0:2]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v3i32_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
+; GFX7-NEXT:    v_mov_b32_e32 v1, s0
+; GFX7-NEXT:    v_mov_b32_e32 v2, s1
+; GFX7-NEXT:    v_mov_b32_e32 v3, s2
+; GFX7-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
+; GFX7-NEXT:    ds_write_b32 v0, v3 offset:8
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v3i32_align4:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    ds_write2_b32 v0, v2, v1 offset1:1
+; GFX6-NEXT:    v_mov_b32_e32 v1, s2
+; GFX6-NEXT:    ds_write_b32 v0, v1 offset:8
+; GFX6-NEXT:    s_endpgm
+  store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 4
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v3i32_align8(<3 x i32> addrspace(3)* %out, <3 x i32> %x) {
+; GFX9-LABEL: store_lds_v3i32_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    ds_write_b96 v3, v[0:2]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v3i32_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v2, s4
+; GFX7-NEXT:    v_mov_b32_e32 v3, s2
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    ds_write_b32 v2, v3 offset:8
+; GFX7-NEXT:    ds_write_b64 v2, v[0:1]
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v3i32_align8:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v2, s4
+; GFX6-NEXT:    v_mov_b32_e32 v3, s2
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    ds_write_b32 v2, v3 offset:8
+; GFX6-NEXT:    ds_write_b64 v2, v[0:1]
+; GFX6-NEXT:    s_endpgm
+  store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 8
+  ret void
+}
+
+define amdgpu_kernel void @store_lds_v3i32_align16(<3 x i32> addrspace(3)* %out, <3 x i32> %x) {
+; GFX9-LABEL: store_lds_v3i32_align16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dword s4, s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x34
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v3, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    ds_write_b96 v3, v[0:2]
+; GFX9-NEXT:    s_endpgm
+;
+; GFX7-LABEL: store_lds_v3i32_align16:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX7-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX7-NEXT:    s_mov_b32 m0, -1
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v3, s4
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-NEXT:    ds_write_b96 v3, v[0:2]
+; GFX7-NEXT:    s_endpgm
+;
+; GFX6-LABEL: store_lds_v3i32_align16:
+; GFX6:       ; %bb.0:
+; GFX6-NEXT:    s_load_dword s4, s[0:1], 0x9
+; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xd
+; GFX6-NEXT:    s_mov_b32 m0, -1
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    v_mov_b32_e32 v2, s4
+; GFX6-NEXT:    v_mov_b32_e32 v3, s2
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    v_mov_b32_e32 v1, s1
+; GFX6-NEXT:    ds_write_b32 v2, v3 offset:8
+; GFX6-NEXT:    ds_write_b64 v2, v[0:1]
+; GFX6-NEXT:    s_endpgm
+  store <3 x i32> %x, <3 x i32> addrspace(3)* %out, align 16
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/store-local.ll b/llvm/test/CodeGen/AMDGPU/store-local.ll
index f302ea099b75..8984e22bed5b 100644
--- a/llvm/test/CodeGen/AMDGPU/store-local.ll
+++ b/llvm/test/CodeGen/AMDGPU/store-local.ll
@@ -179,8 +179,9 @@ entry:
 ; CM: LDS_WRITE
 ; CM: LDS_WRITE
 
-; GCN: ds_write2_b32
-; GCN: ds_write2_b32
+; SICIVI: ds_write2_b32
+; SICIVI: ds_write2_b32
+; GFX9: ds_write_b128
 define amdgpu_kernel void @store_local_v4i32_align4(<4 x i32> addrspace(3)* %out, <4 x i32> %in) {
 entry:
   store <4 x i32> %in, <4 x i32> addrspace(3)* %out, align 4


        


More information about the llvm-commits mailing list