[llvm] 86f9117 - AMDGPU: Don't report 2-byte alignment as fast

Eli Friedman via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 11 15:46:59 PST 2020


I don't understand how two-byte alignment could possibly be *worse* than one-byte alignment.  You can always just pretend the specified alignment was 1 instead of 2.

Maybe you meant to check the size of the access?

-Eli
________________________________
From: llvm-commits <llvm-commits-bounces at lists.llvm.org> on behalf of Matt Arsenault via llvm-commits <llvm-commits at lists.llvm.org>
Sent: Tuesday, February 11, 2020 3:35 PM
To: llvm-commits at lists.llvm.org <llvm-commits at lists.llvm.org>
Subject: [EXT] [llvm] 86f9117 - AMDGPU: Don't report 2-byte alignment as fast


Author: Matt Arsenault
Date: 2020-02-11T18:35:00-05:00
New Revision: 86f9117d476bcef2f5e0eabae4781e99877ce7b5

URL: https://github.com/llvm/llvm-project/commit/86f9117d476bcef2f5e0eabae4781e99877ce7b5
DIFF: https://github.com/llvm/llvm-project/commit/86f9117d476bcef2f5e0eabae4781e99877ce7b5.diff

LOG: AMDGPU: Don't report 2-byte alignment as fast

This is apparently worse than 1-byte alignment. This does not attempt
to decompose 2-byte aligned wide stores, but will stop trying to
produce them.

Also fix bug in LoadStoreVectorizer which was decreasing the alignment
and vectorizing stack accesses. It was assuming a stack object was an
alloca that could have its base alignment changed, which is not true
if the pointer is derived from a function argument.

Added:
    llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll
    llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll

Modified:
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
    llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
    llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll

Removed:



################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b6966e66c36b..55003521b8b2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1251,9 +1251,11 @@ bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
     // If we have an uniform constant load, it still requires using a slow
     // buffer instruction if unaligned.
     if (IsFast) {
+      // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so
+      // 2-byte alignment is worse than 1 unless doing a 2-byte accesss.
       *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
                  AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ?
-        (Align % 4 == 0) : true;
+        Align >= 4 : Align != 2;
     }

     return true;

diff  --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 3b22f3082c33..8ab03c34335d 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -1028,8 +1028,10 @@ bool Vectorizer::vectorizeStoreChain(
     unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
                                                    StackAdjustedAlignment,
                                                    DL, S0, nullptr, &DT);
-    if (NewAlign != 0)
+    if (NewAlign >= Alignment.value())
       Alignment = Align(NewAlign);
+    else
+      return false;
   }

   if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment.value(), AS)) {
@@ -1168,8 +1170,12 @@ bool Vectorizer::vectorizeLoadChain(
              vectorizeLoadChain(Chains.second, InstructionsProcessed);
     }

-    Alignment = getOrEnforceKnownAlignment(
-        L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT);
+    unsigned NewAlign = getOrEnforceKnownAlignment(
+      L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT);
+    if (NewAlign >= Alignment)
+      Alignment = NewAlign;
+    else
+      return false;
   }

   if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) {

diff  --git a/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll b/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
index 9ec8b7573ceb..0df32537808a 100644
--- a/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
+++ b/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
@@ -199,14 +199,17 @@ define amdgpu_kernel void @vload2_private(i16 addrspace(1)* nocapture readonly %
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_mov_b32_e32 v2, s4
 ; GCN-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NEXT:    global_load_ushort v4, v[2:3], off offset:4
-; GCN-NEXT:    global_load_dword v2, v[2:3], off
+; GCN-NEXT:    global_load_ushort v4, v[2:3], off
 ; GCN-NEXT:    v_mov_b32_e32 v0, s6
 ; GCN-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    buffer_store_short v2, off, s[0:3], s9 offset:4
-; GCN-NEXT:    buffer_store_short_d16_hi v2, off, s[0:3], s9 offset:6
-; GCN-NEXT:    buffer_store_short v4, off, s[0:3], s9 offset:8
+; GCN-NEXT:    buffer_store_short v4, off, s[0:3], s9 offset:4
+; GCN-NEXT:    global_load_ushort v4, v[2:3], off offset:2
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_short v4, off, s[0:3], s9 offset:6
+; GCN-NEXT:    global_load_ushort v2, v[2:3], off offset:4
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_short v2, off, s[0:3], s9 offset:8
 ; GCN-NEXT:    buffer_load_ushort v2, off, s[0:3], s9 offset:4
 ; GCN-NEXT:    buffer_load_ushort v4, off, s[0:3], s9 offset:6
 ; GCN-NEXT:    s_waitcnt vmcnt(1)

diff  --git a/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll b/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll
new file mode 100644
index 000000000000..34f8706ac66c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.global.ll
@@ -0,0 +1,328 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7-ALIGNED %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7-UNALIGNED %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+
+; Should not merge this to a dword load
+define i32 @global_load_2xi16_align2(i16 addrspace(1)* %p) #0 {
+; GFX7-ALIGNED-LABEL: global_load_2xi16_align2:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
+; GFX7-ALIGNED-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
+; GFX7-ALIGNED-NEXT:    flat_load_ushort v0, v[0:1]
+; GFX7-ALIGNED-NEXT:    flat_load_ushort v1, v[2:3]
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-ALIGNED-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX7-ALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: global_load_2xi16_align2:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
+; GFX7-UNALIGNED-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
+; GFX7-UNALIGNED-NEXT:    flat_load_ushort v0, v[0:1]
+; GFX7-UNALIGNED-NEXT:    flat_load_ushort v1, v[2:3]
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-UNALIGNED-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_load_2xi16_align2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_ushort v2, v[0:1], off
+; GFX9-NEXT:    global_load_ushort v0, v[0:1], off offset:2
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_lshl_or_b32 v0, v0, 16, v2
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(1)* %p, align 2
+  %p.1 = load i16, i16 addrspace(1)* %gep.p, align 2
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
+; Should not merge this to a dword store
+define amdgpu_kernel void @global_store_2xi16_align2(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
+; GFX7-ALIGNED-LABEL: global_store_2xi16_align2:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v2, 1
+; GFX7-ALIGNED-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-ALIGNED-NEXT:    s_add_u32 s2, s0, 2
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-ALIGNED-NEXT:    flat_store_short v[0:1], v2
+; GFX7-ALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v0, s2
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v2, 2
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-ALIGNED-NEXT:    flat_store_short v[0:1], v2
+; GFX7-ALIGNED-NEXT:    s_endpgm
+;
+; GFX7-UNALIGNED-LABEL: global_store_2xi16_align2:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v2, 1
+; GFX7-UNALIGNED-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-UNALIGNED-NEXT:    s_add_u32 s2, s0, 2
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-UNALIGNED-NEXT:    flat_store_short v[0:1], v2
+; GFX7-UNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v0, s2
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v2, 2
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v1, s3
+; GFX7-UNALIGNED-NEXT:    flat_store_short v[0:1], v2
+; GFX7-UNALIGNED-NEXT:    s_endpgm
+;
+; GFX9-LABEL: global_store_2xi16_align2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX9-NEXT:    v_mov_b32_e32 v2, 1
+; GFX9-NEXT:    v_mov_b32_e32 v3, 2
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    global_store_short v[0:1], v2, off
+; GFX9-NEXT:    global_store_short v[0:1], v3, off offset:2
+; GFX9-NEXT:    s_endpgm
+  %gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1
+  store i16 1, i16 addrspace(1)* %r, align 2
+  store i16 2, i16 addrspace(1)* %gep.r, align 2
+  ret void
+}
+
+; Should produce align 1 dword when legal
+define i32 @global_load_2xi16_align1(i16 addrspace(1)* %p) #0 {
+; GFX7-ALIGNED-LABEL: global_load_2xi16_align1:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
+; GFX7-ALIGNED-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
+; GFX7-ALIGNED-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
+; GFX7-ALIGNED-NEXT:    flat_load_ubyte v6, v[0:1]
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v0, vcc, 3, v0
+; GFX7-ALIGNED-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX7-ALIGNED-NEXT:    flat_load_ubyte v2, v[2:3]
+; GFX7-ALIGNED-NEXT:    flat_load_ubyte v3, v[4:5]
+; GFX7-ALIGNED-NEXT:    flat_load_ubyte v0, v[0:1]
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(1) lgkmcnt(1)
+; GFX7-ALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 8, v3
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
+; GFX7-ALIGNED-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX7-ALIGNED-NEXT:    v_or_b32_e32 v1, v1, v6
+; GFX7-ALIGNED-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX7-ALIGNED-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX7-ALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: global_load_2xi16_align1:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    flat_load_dword v0, v[0:1]
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_load_2xi16_align1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v0, v[0:1], off
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff
+; GFX9-NEXT:    s_mov_b32 s4, 0xffff
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v1, v1, 0, v0
+; GFX9-NEXT:    v_and_or_b32 v0, v0, s4, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(1)* %p, align 1
+  %p.1 = load i16, i16 addrspace(1)* %gep.p, align 1
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
+; Should produce align 1 dword when legal
+define amdgpu_kernel void @global_store_2xi16_align1(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
+; GFX7-ALIGNED-LABEL: global_store_2xi16_align1:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v4, 1
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v5, 0
+; GFX7-ALIGNED-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    s_add_u32 s2, s0, 2
+; GFX7-ALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX7-ALIGNED-NEXT:    s_add_u32 s4, s0, 1
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-ALIGNED-NEXT:    s_addc_u32 s5, s1, 0
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-ALIGNED-NEXT:    s_add_u32 s0, s0, 3
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v2, s4
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v3, s5
+; GFX7-ALIGNED-NEXT:    flat_store_byte v[0:1], v4
+; GFX7-ALIGNED-NEXT:    flat_store_byte v[2:3], v5
+; GFX7-ALIGNED-NEXT:    s_addc_u32 s1, s1, 0
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v2, s2
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v4, 2
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v3, s3
+; GFX7-ALIGNED-NEXT:    flat_store_byte v[0:1], v5
+; GFX7-ALIGNED-NEXT:    flat_store_byte v[2:3], v4
+; GFX7-ALIGNED-NEXT:    s_endpgm
+;
+; GFX7-UNALIGNED-LABEL: global_store_2xi16_align1:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v2, 0x20001
+; GFX7-UNALIGNED-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-UNALIGNED-NEXT:    flat_store_dword v[0:1], v2
+; GFX7-UNALIGNED-NEXT:    s_endpgm
+;
+; GFX9-LABEL: global_store_2xi16_align1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0x20001
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    global_store_dword v[0:1], v2, off
+; GFX9-NEXT:    s_endpgm
+  %gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1
+  store i16 1, i16 addrspace(1)* %r, align 1
+  store i16 2, i16 addrspace(1)* %gep.r, align 1
+  ret void
+}
+
+; Should merge this to a dword load
+define i32 @global_load_2xi16_align4(i16 addrspace(1)* %p) #0 {
+; GFX7-LABEL: load_2xi16_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    flat_load_dword v0, v[0:1]
+; GFX7-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-ALIGNED-LABEL: global_load_2xi16_align4:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    flat_load_dword v0, v[0:1]
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: global_load_2xi16_align4:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    flat_load_dword v0, v[0:1]
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: global_load_2xi16_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dword v0, v[0:1], off
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff
+; GFX9-NEXT:    s_mov_b32 s4, 0xffff
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v1, v1, 0, v0
+; GFX9-NEXT:    v_and_or_b32 v0, v0, s4, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(1)* %p, align 4
+  %p.1 = load i16, i16 addrspace(1)* %gep.p, align 2
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
+; Should merge this to a dword store
+define amdgpu_kernel void @global_store_2xi16_align4(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
+; GFX7-LABEL: global_store_2xi16_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GFX7-NEXT:    v_mov_b32_e32 v2, 0x20001
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    flat_store_dword v[0:1], v2
+; GFX7-NEXT:    s_endpgm
+;
+; GFX7-ALIGNED-LABEL: global_store_2xi16_align4:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v2, 0x20001
+; GFX7-ALIGNED-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-ALIGNED-NEXT:    flat_store_dword v[0:1], v2
+; GFX7-ALIGNED-NEXT:    s_endpgm
+;
+; GFX7-UNALIGNED-LABEL: global_store_2xi16_align4:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v2, 0x20001
+; GFX7-UNALIGNED-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-UNALIGNED-NEXT:    flat_store_dword v[0:1], v2
+; GFX7-UNALIGNED-NEXT:    s_endpgm
+;
+; GFX9-LABEL: global_store_2xi16_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x8
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0x20001
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    global_store_dword v[0:1], v2, off
+; GFX9-NEXT:    s_endpgm
+  %gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1
+  store i16 1, i16 addrspace(1)* %r, align 4
+  store i16 2, i16 addrspace(1)* %gep.r, align 2
+  ret void
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

diff  --git a/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll b/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll
new file mode 100644
index 000000000000..0053d2f3019d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll
@@ -0,0 +1,245 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=-unaligned-scratch-access < %s | FileCheck -check-prefixes=GCN,GFX7-ALIGNED %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=+unaligned-scratch-access < %s | FileCheck -check-prefixes=GCN,GFX7-UNALIGNED %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+unaligned-scratch-access < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+
+; Should not merge this to a dword load
+define i32 @private_load_2xi16_align2(i16 addrspace(5)* %p) #0 {
+; GFX7-ALIGNED-LABEL: private_load_2xi16_align2:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v1, vcc, 2, v0
+; GFX7-ALIGNED-NEXT:    buffer_load_ushort v1, v1, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    buffer_load_ushort v0, v0, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-ALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX7-ALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: private_load_2xi16_align2:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_add_i32_e32 v1, vcc, 2, v0
+; GFX7-UNALIGNED-NEXT:    buffer_load_ushort v1, v1, s[0:3], s33 offen
+; GFX7-UNALIGNED-NEXT:    buffer_load_ushort v0, v0, s[0:3], s33 offen
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-UNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: private_load_2xi16_align2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    buffer_load_ushort v1, v0, s[0:3], s33 offen
+; GFX9-NEXT:    buffer_load_ushort v0, v0, s[0:3], s33 offen offset:2
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_lshl_or_b32 v0, v0, 16, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(5)* %p, align 2
+  %p.1 = load i16, i16 addrspace(5)* %gep.p, align 2
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
+; Should not merge this to a dword store
+define void @private_store_2xi16_align2(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 {
+; GFX7-ALIGNED-LABEL: private_store_2xi16_align2:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v3, 1
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v0, 2
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v2, vcc, 2, v1
+; GFX7-ALIGNED-NEXT:    buffer_store_short v3, v1, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    buffer_store_short v0, v2, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-ALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: private_store_2xi16_align2:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v3, 1
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v0, 2
+; GFX7-UNALIGNED-NEXT:    v_add_i32_e32 v2, vcc, 2, v1
+; GFX7-UNALIGNED-NEXT:    buffer_store_short v3, v1, s[0:3], s33 offen
+; GFX7-UNALIGNED-NEXT:    buffer_store_short v0, v2, s[0:3], s33 offen
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: private_store_2xi16_align2:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 1
+; GFX9-NEXT:    buffer_store_short v0, v1, s[0:3], s33 offen
+; GFX9-NEXT:    v_mov_b32_e32 v0, 2
+; GFX9-NEXT:    buffer_store_short v0, v1, s[0:3], s33 offen offset:2
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep.r = getelementptr i16, i16 addrspace(5)* %r, i64 1
+  store i16 1, i16 addrspace(5)* %r, align 2
+  store i16 2, i16 addrspace(5)* %gep.r, align 2
+  ret void
+}
+
+; Should produce align 1 dword when legal
+define i32 @private_load_2xi16_align1(i16 addrspace(5)* %p) #0 {
+; GFX7-ALIGNED-LABEL: private_load_2xi16_align1:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v1, vcc, 3, v0
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
+; GFX7-ALIGNED-NEXT:    buffer_load_ubyte v1, v1, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    buffer_load_ubyte v3, v3, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    buffer_load_ubyte v2, v2, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    buffer_load_ubyte v0, v0, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX7-ALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(2)
+; GFX7-ALIGNED-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-ALIGNED-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_or_b32_e32 v0, v3, v0
+; GFX7-ALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-ALIGNED-NEXT:    v_or_b32_e32 v0, v0, v1
+; GFX7-ALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: private_load_2xi16_align1:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    buffer_load_dword v0, v0, s[0:3], s33 offen
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: private_load_2xi16_align1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    buffer_load_dword v0, v0, s[0:3], s33 offen
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff
+; GFX9-NEXT:    s_mov_b32 s4, 0xffff
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v1, v1, 0, v0
+; GFX9-NEXT:    v_and_or_b32 v0, v0, s4, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(5)* %p, align 1
+  %p.1 = load i16, i16 addrspace(5)* %gep.p, align 1
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
+; Should produce align 1 dword when legal
+define void @private_store_2xi16_align1(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 {
+; GFX7-ALIGNED-LABEL: private_store_2xi16_align1:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v3, 1
+; GFX7-ALIGNED-NEXT:    buffer_store_byte v3, v1, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v2, vcc, 2, v1
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v3, vcc, 1, v1
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v4, 0
+; GFX7-ALIGNED-NEXT:    v_add_i32_e32 v1, vcc, 3, v1
+; GFX7-ALIGNED-NEXT:    v_mov_b32_e32 v0, 2
+; GFX7-ALIGNED-NEXT:    buffer_store_byte v4, v3, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    buffer_store_byte v4, v1, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    buffer_store_byte v0, v2, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-ALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: private_store_2xi16_align1:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v0, 0x20001
+; GFX7-UNALIGNED-NEXT:    buffer_store_dword v0, v1, s[0:3], s33 offen
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: private_store_2xi16_align1:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x20001
+; GFX9-NEXT:    buffer_store_dword v0, v1, s[0:3], s33 offen
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep.r = getelementptr i16, i16 addrspace(5)* %r, i64 1
+  store i16 1, i16 addrspace(5)* %r, align 1
+  store i16 2, i16 addrspace(5)* %gep.r, align 1
+  ret void
+}
+
+; Should merge this to a dword load
+define i32 @private_load_2xi16_align4(i16 addrspace(5)* %p) #0 {
+; GFX7-LABEL: load_2xi16_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    flat_load_dword v0, v[0:1]
+; GFX7-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-ALIGNED-LABEL: private_load_2xi16_align4:
+; GFX7-ALIGNED:       ; %bb.0:
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-ALIGNED-NEXT:    buffer_load_dword v0, v0, s[0:3], s33 offen
+; GFX7-ALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-ALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: private_load_2xi16_align4:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    buffer_load_dword v0, v0, s[0:3], s33 offen
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: private_load_2xi16_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    buffer_load_dword v0, v0, s[0:3], s33 offen
+; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff
+; GFX9-NEXT:    s_mov_b32 s4, 0xffff
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_bfi_b32 v1, v1, 0, v0
+; GFX9-NEXT:    v_and_or_b32 v0, v0, s4, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(5)* %p, align 4
+  %p.1 = load i16, i16 addrspace(5)* %gep.p, align 2
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
+; Should merge this to a dword store
+define void @private_store_2xi16_align4(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 {
+; GFX7-LABEL: private_store_2xi16_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GFX7-NEXT:    v_mov_b32_e32 v2, 0x20001
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    flat_store_dword v[0:1], v2
+; GFX7-NEXT:    s_endpgm
+;
+; GCN-LABEL: private_store_2xi16_align4:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, 0x20001
+; GCN-NEXT:    buffer_store_dword v0, v1, s[0:3], s33 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %gep.r = getelementptr i16, i16 addrspace(5)* %r, i64 1
+  store i16 1, i16 addrspace(5)* %r, align 4
+  store i16 2, i16 addrspace(5)* %gep.r, align 2
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll b/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll
index 9bcf35e13a1b..020f677ee3cf 100644
--- a/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll
@@ -665,4 +665,25 @@ define void @private_store_align2_f64(double addrspace(5)* %out, double %x) #0 {
   ret void
 }

+; Should not merge this to a dword store
+define amdgpu_kernel void @global_store_2xi16_align2(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
+  %gep.r = getelementptr i16, i16 addrspace(1)* %r, i64 1
+  %v = load i16, i16 addrspace(1)* %p, align 2
+  store i16 1, i16 addrspace(1)* %r, align 2
+  store i16 2, i16 addrspace(1)* %gep.r, align 2
+  ret void
+}
+
+; Should not merge this to a word load
+define i32 @load_2xi16_align2(i16 addrspace(1)* %p) #0 {
+  %gep.p = getelementptr i16, i16 addrspace(1)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(1)* %p, align 2
+  %p.1 = load i16, i16 addrspace(1)* %gep.p, align 2
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
 attributes #0 = { nounwind }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
index b0dd5d185c77..9f85fec33ba1 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
@@ -207,4 +207,55 @@ define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i8() {
   ret void
 }

+; Make sure we don't think the alignment will increase if the base address isn't an alloca
+; ALL-LABEL: @private_store_2xi16_align2_not_alloca(
+; ALL: store i16
+; ALL: store i16
+define void @private_store_2xi16_align2_not_alloca(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 {
+  %gep.r = getelementptr i16, i16 addrspace(5)* %r, i32 1
+  store i16 1, i16 addrspace(5)* %r, align 2
+  store i16 2, i16 addrspace(5)* %gep.r, align 2
+  ret void
+}
+
+; ALL-LABEL: @private_store_2xi16_align1_not_alloca(
+; ALIGNED: store i16
+; ALIGNED: store i16
+; UNALIGNED: store <2 x i16>
+define void @private_store_2xi16_align1_not_alloca(i16 addrspace(5)* %p, i16 addrspace(5)* %r) #0 {
+  %gep.r = getelementptr i16, i16 addrspace(5)* %r, i32 1
+  store i16 1, i16 addrspace(5)* %r, align 1
+  store i16 2, i16 addrspace(5)* %gep.r, align 1
+  ret void
+}
+
+; ALL-LABEL: @private_load_2xi16_align2_not_alloca(
+; ALL: load i16
+; ALL: load i16
+define i32 @private_load_2xi16_align2_not_alloca(i16 addrspace(5)* %p) #0 {
+  %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(5)* %p, align 2
+  %p.1 = load i16, i16 addrspace(5)* %gep.p, align 2
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
+; ALL-LABEL: @private_load_2xi16_align1_not_alloca(
+; ALIGNED: load i16
+; ALIGNED: load i16
+; UNALIGNED: load <2 x i16>
+define i32 @private_load_2xi16_align1_not_alloca(i16 addrspace(5)* %p) #0 {
+  %gep.p = getelementptr i16, i16 addrspace(5)* %p, i64 1
+  %p.0 = load i16, i16 addrspace(5)* %p, align 1
+  %p.1 = load i16, i16 addrspace(5)* %gep.p, align 1
+  %zext.0 = zext i16 %p.0 to i32
+  %zext.1 = zext i16 %p.1 to i32
+  %shl.1 = shl i32 %zext.1, 16
+  %or = or i32 %zext.0, %shl.1
+  ret i32 %or
+}
+
 attributes #0 = { nounwind }

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
index 4292cbcec850..31a1c270bd0e 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores-private.ll
@@ -57,20 +57,10 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align1(
 }

 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v4i32_align2(
-; ALIGNED: store i32 9, i32 addrspace(5)* %out, align 2
-; ALIGNED: store i32 1, i32 addrspace(5)* %out.gep.1, align 2
-; ALIGNED: store i32 23, i32 addrspace(5)* %out.gep.2, align 2
-; ALIGNED: store i32 19, i32 addrspace(5)* %out.gep.3, align 2
-
-; ELT16-UNALIGNED: store <4 x i32> <i32 9, i32 1, i32 23, i32 19>, <4 x i32> addrspace(5)* %1, align 2
-
-; ELT8-UNALIGNED: store <2 x i32>
-; ELT8-UNALIGNED: store <2 x i32>
-
-; ELT4-UNALIGNED: store i32
-; ELT4-UNALIGNED: store i32
-; ELT4-UNALIGNED: store i32
-; ELT4-UNALIGNED: store i32
+; ALL: store i32
+; ALL: store i32
+; ALL: store i32
+; ALL: store i32
 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i32_align2(i32 addrspace(5)* %out) #0 {
   %out.gep.1 = getelementptr i32, i32 addrspace(5)* %out, i32 1
   %out.gep.2 = getelementptr i32, i32 addrspace(5)* %out, i32 2
@@ -127,10 +117,8 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16(i16 add
 }

 ; ALL-LABEL: @merge_private_store_4_vector_elts_loads_v2i16_align2(
-; ALIGNED: store i16
-; ALIGNED: store i16
-
-; UNALIGNED: store <2 x i16> <i16 9, i16 12>, <2 x i16> addrspace(5)* %1, align 2
+; ALL: store i16
+; ALL: store i16
 define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v2i16_align2(i16 addrspace(5)* %out) #0 {
   %out.gep.1 = getelementptr i16, i16 addrspace(5)* %out, i32 1


diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
index 0d9a4184e718..8302ad9562f5 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
@@ -49,7 +49,8 @@ define amdgpu_kernel void @merge_global_store_2_constants_0_i16(i16 addrspace(1)
 }

 ; CHECK-LABEL: @merge_global_store_2_constants_i16_natural_align
-; CHECK: store <2 x i16>
+; CHECK: store i16
+; CHECK: store i16
 define amdgpu_kernel void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 {
   %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1

@@ -58,8 +59,19 @@ define amdgpu_kernel void @merge_global_store_2_constants_i16_natural_align(i16
   ret void
 }

+; CHECK-LABEL: @merge_global_store_2_constants_i16_align_1
+; CHECK: store <2 x i16>
+define amdgpu_kernel void @merge_global_store_2_constants_i16_align_1(i16 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
+
+  store i16 123, i16 addrspace(1)* %out.gep.1, align 1
+  store i16 456, i16 addrspace(1)* %out, align 1
+  ret void
+}
+
 ; CHECK-LABEL: @merge_global_store_2_constants_half_natural_align
-; CHECK: store <2 x half>
+; CHECK: store half
+; CHECK: store half
 define amdgpu_kernel void @merge_global_store_2_constants_half_natural_align(half addrspace(1)* %out) #0 {
   %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1

@@ -68,6 +80,16 @@ define amdgpu_kernel void @merge_global_store_2_constants_half_natural_align(hal
   ret void
 }

+; CHECK-LABEL: @merge_global_store_2_constants_half_align_1
+; CHECK: store <2 x half>
+define amdgpu_kernel void @merge_global_store_2_constants_half_align_1(half addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1
+
+  store half 2.0, half addrspace(1)* %out.gep.1, align 1
+  store half 1.0, half addrspace(1)* %out, align 1
+  ret void
+}
+
 ; CHECK-LABEL: @merge_global_store_2_constants_i32
 ; CHECK: store <2 x i32> <i32 456, i32 123>, <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4
 define amdgpu_kernel void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {



_______________________________________________
llvm-commits mailing list
llvm-commits at lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20200211/29726113/attachment-0001.html>


More information about the llvm-commits mailing list