[llvm] 2ca5523 - AMDGPU/GlobalISel: Fix 8-byte aligned, 96-bit scalar loads

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 15 08:33:23 PDT 2020


Author: Matt Arsenault
Date: 2020-06-15T11:33:16-04:00
New Revision: 2ca552322c29b61e8c20a0b31cf452de88d8af1c

URL: https://github.com/llvm/llvm-project/commit/2ca552322c29b61e8c20a0b31cf452de88d8af1c
DIFF: https://github.com/llvm/llvm-project/commit/2ca552322c29b61e8c20a0b31cf452de88d8af1c.diff

LOG: AMDGPU/GlobalISel: Fix 8-byte aligned, 96-bit scalar loads

These are legal since we can do a 96-bit load on some subtargets, but
this is only for vector loads. If we can't widen the load, it needs to
be broken down once known scalar. For 16-byte alignment, widen to a
128-bit load.

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 40f626649f04..b8f338df9213 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -1102,28 +1102,86 @@ void AMDGPURegisterBankInfo::constrainOpWithReadfirstlane(
   MI.getOperand(OpIdx).setReg(SGPR);
 }
 
-bool AMDGPURegisterBankInfo::applyMappingWideLoad(MachineInstr &MI,
+/// Split \p Ty into 2 pieces. The first will have \p FirstSize bits, and the
+/// rest will be in the remainder.
+static std::pair<LLT, LLT> splitUnequalType(LLT Ty, unsigned FirstSize) {
+  unsigned TotalSize = Ty.getSizeInBits();
+  if (!Ty.isVector())
+    return {LLT::scalar(FirstSize), LLT::scalar(TotalSize - FirstSize)};
+
+  LLT EltTy = Ty.getElementType();
+  unsigned EltSize = EltTy.getSizeInBits();
+  assert(FirstSize % EltSize == 0);
+
+  unsigned FirstPartNumElts = FirstSize / EltSize;
+  unsigned RemainderElts = (TotalSize - FirstSize) / EltSize;
+
+  return {LLT::scalarOrVector(FirstPartNumElts, EltTy),
+          LLT::scalarOrVector(RemainderElts, EltTy)};
+}
+
+static LLT widen96To128(LLT Ty) {
+  if (!Ty.isVector())
+    return LLT::scalar(128);
+
+  LLT EltTy = Ty.getElementType();
+  assert(128 % EltTy.getSizeInBits() == 0);
+  return LLT::vector(128 / EltTy.getSizeInBits(), EltTy);
+}
+
+bool AMDGPURegisterBankInfo::applyMappingLoad(MachineInstr &MI,
                         const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper,
                                               MachineRegisterInfo &MRI) const {
   Register DstReg = MI.getOperand(0).getReg();
-  const LLT LoadTy =  MRI.getType(DstReg);
+  const LLT LoadTy = MRI.getType(DstReg);
   unsigned LoadSize = LoadTy.getSizeInBits();
   const unsigned MaxNonSmrdLoadSize = 128;
+
+  const RegisterBank *PtrBank =
+    OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
+  if (PtrBank == &AMDGPU::SGPRRegBank) {
+    // If the pointer is an SGPR, we ordinarily have nothing to do.
+    if (LoadSize != 96)
+      return false;
+
+    MachineMemOperand *MMO = *MI.memoperands_begin();
+    Register PtrReg = MI.getOperand(1).getReg();
+    // 96-bit loads are only available for vector loads. We need to split this
+    // into a 64-bit part, and 32 (unless we can widen to a 128-bit load).
+
+    MachineIRBuilder B(MI);
+    ApplyRegBankMapping O(*this, MRI, &AMDGPU::SGPRRegBank);
+    GISelObserverWrapper Observer(&O);
+    B.setChangeObserver(Observer);
+
+    if (MMO->getAlign() < Align(16)) {
+      LLT Part64, Part32;
+      std::tie(Part64, Part32) = splitUnequalType(LoadTy, 64);
+      auto Load0 = B.buildLoadFromOffset(Part64, PtrReg, *MMO, 0);
+      auto Load1 = B.buildLoadFromOffset(Part32, PtrReg, *MMO, 8);
+
+      auto Undef = B.buildUndef(LoadTy);
+      auto Ins0 = B.buildInsert(LoadTy, Undef, Load0, 0);
+      B.buildInsert(MI.getOperand(0), Ins0, Load1, 64);
+    } else {
+      LLT WiderTy = widen96To128(LoadTy);
+      auto WideLoad = B.buildLoadFromOffset(WiderTy, PtrReg, *MMO, 0);
+      B.buildExtract(MI.getOperand(0), WideLoad, 0);
+    }
+
+    MI.eraseFromParent();
+    return true;
+  }
+
   // 128-bit loads are supported for all instruction types.
   if (LoadSize <= MaxNonSmrdLoadSize)
     return false;
 
-  SmallVector<unsigned, 16> DefRegs(OpdMapper.getVRegs(0));
-  SmallVector<unsigned, 1> SrcRegs(OpdMapper.getVRegs(1));
+  SmallVector<Register, 16> DefRegs(OpdMapper.getVRegs(0));
+  SmallVector<Register, 1> SrcRegs(OpdMapper.getVRegs(1));
 
-  // If the pointer is an SGPR, we have nothing to do.
-  if (SrcRegs.empty()) {
-    const RegisterBank *PtrBank =
-      OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank;
-    if (PtrBank == &AMDGPU::SGPRRegBank)
-      return false;
+  if (SrcRegs.empty())
     SrcRegs.push_back(MI.getOperand(1).getReg());
-  }
 
   assert(LoadSize % MaxNonSmrdLoadSize == 0);
 
@@ -3035,7 +3093,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
   case AMDGPU::G_LOAD:
   case AMDGPU::G_ZEXTLOAD:
   case AMDGPU::G_SEXTLOAD: {
-    if (applyMappingWideLoad(MI, OpdMapper, MRI))
+    if (applyMappingLoad(MI, OpdMapper, MRI))
       return;
     break;
   }

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
index 6ee88944dbe7..8f38ec4eeb3a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
@@ -69,13 +69,12 @@ class AMDGPURegisterBankInfo : public AMDGPUGenRegisterBankInfo {
 
   void constrainOpWithReadfirstlane(MachineInstr &MI, MachineRegisterInfo &MRI,
                                     unsigned OpIdx) const;
-  bool applyMappingWideLoad(MachineInstr &MI,
-                            const OperandsMapper &OpdMapper,
-                            MachineRegisterInfo &MRI) const;
-
   bool applyMappingDynStackAlloc(MachineInstr &MI,
                                  const OperandsMapper &OpdMapper,
                                  MachineRegisterInfo &MRI) const;
+  bool applyMappingLoad(MachineInstr &MI,
+                        const OperandsMapper &OpdMapper,
+                        MachineRegisterInfo &MRI) const;
   bool
   applyMappingImage(MachineInstr &MI,
                     const OperandsMapper &OpdMapper,

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll
new file mode 100644
index 000000000000..4ca0a1aa049f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll
@@ -0,0 +1,823 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-UNALIGNED %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX9,GFX9-NOUNALIGNED %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=+unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-UNALIGNED %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=hawaii -mattr=-unaligned-buffer-access < %s | FileCheck -check-prefixes=GCN,GFX7,GFX7-NOUNALIGNED %s
+
+; FIXME:
+; XUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,GFX6 %s
+
+define <3 x i32> @v_load_constant_v3i32_align1(<3 x i32> addrspace(4)* %ptr) {
+; GFX9-UNALIGNED-LABEL: v_load_constant_v3i32_align1:
+; GFX9-UNALIGNED:       ; %bb.0:
+; GFX9-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-UNALIGNED-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-NOUNALIGNED-LABEL: v_load_constant_v3i32_align1:
+; GFX9-NOUNALIGNED:       ; %bb.0:
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NOUNALIGNED-NEXT:    v_add_co_u32_e32 v2, vcc, 11, v0
+; GFX9-NOUNALIGNED-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v6, v[2:3], off offset:-6
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v7, v[2:3], off offset:-5
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v8, v[2:3], off offset:-4
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v9, v[2:3], off offset:-3
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v10, v[2:3], off offset:-2
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v11, v[2:3], off offset:-1
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v12, v[2:3], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v0, v[0:1], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v1, v[2:3], off offset:-10
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v13, v[2:3], off offset:-9
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v14, v[2:3], off offset:-8
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v2, v[2:3], off offset:-7
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v4, 0xff
+; GFX9-NOUNALIGNED-NEXT:    s_movk_i32 s4, 0xff
+; GFX9-NOUNALIGNED-NEXT:    s_mov_b32 s5, 8
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v5, 8
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(11)
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_sdwa v6, v5, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(10)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v7, v7, v4
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(9)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v8, v8, v4
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(7)
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_sdwa v5, v5, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(6)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v10, v11, v4
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(5)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v11, v12, v4
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(2)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v3, s4, v13
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v13, s4, v14
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v2, v2, v4, v6
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v0, v0, s4, v1
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v3
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v3, 24, v13
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v6, 16, v7
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v7, 24, v8
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v4, v9, v4, v5
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v5, 16, v10
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v8, 24, v11
+; GFX9-NOUNALIGNED-NEXT:    v_or3_b32 v0, v0, v1, v3
+; GFX9-NOUNALIGNED-NEXT:    v_or3_b32 v1, v2, v6, v7
+; GFX9-NOUNALIGNED-NEXT:    v_or3_b32 v2, v4, v5, v8
+; GFX9-NOUNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: v_load_constant_v3i32_align1:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_mov_b32 s6, 0
+; GFX7-UNALIGNED-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-UNALIGNED-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-UNALIGNED-NEXT:    buffer_load_dwordx3 v[0:2], v[0:1], s[4:7], 0 addr64
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-NOUNALIGNED-LABEL: v_load_constant_v3i32_align1:
+; GFX7-NOUNALIGNED:       ; %bb.0:
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s6, 0
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v3, v[0:1], s[4:7], 0 addr64 offset:5
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v4, v[0:1], s[4:7], 0 addr64 offset:6
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v5, v[0:1], s[4:7], 0 addr64 offset:7
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v6, v[0:1], s[4:7], 0 addr64 offset:8
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v7, v[0:1], s[4:7], 0 addr64 offset:9
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v8, v[0:1], s[4:7], 0 addr64 offset:10
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v9, v[0:1], s[4:7], 0 addr64 offset:11
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v10, v[0:1], s[4:7], 0 addr64
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v11, v[0:1], s[4:7], 0 addr64 offset:1
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v12, v[0:1], s[4:7], 0 addr64 offset:2
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v13, v[0:1], s[4:7], 0 addr64 offset:3
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v0, v[0:1], s[4:7], 0 addr64 offset:4
+; GFX7-NOUNALIGNED-NEXT:    v_mov_b32_e32 v2, 0xff
+; GFX7-NOUNALIGNED-NEXT:    s_movk_i32 s8, 0xff
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(11)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v3, v3, v2
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(10)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v4, v4, v2
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(9)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v5, v5, v2
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(8)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v6, v6, v2
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(7)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v7, v7, v2
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(6)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v8, v8, v2
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(5)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v2, v9, v2
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, s8, v10
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v10, s8, v11
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(2)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v11, s8, v12
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v9, 8, v10
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v0, s8, v0
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v7, 8, v7
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v12, s8, v13
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v10, 16, v11
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v1, v1, v9
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v8, 16, v8
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v3, v6, v7
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v11, 24, v12
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v5, 24, v5
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v3, v3, v8
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v1, v1, v10
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v4, v0, v4
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v0, v1, v11
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v1, v4, v5
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v2, v3, v2
+; GFX7-NOUNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 1
+  ret <3 x i32> %load
+}
+
+define <3 x i32> @v_load_constant_v3i32_align2(<3 x i32> addrspace(4)* %ptr) {
+; GFX9-UNALIGNED-LABEL: v_load_constant_v3i32_align2:
+; GFX9-UNALIGNED:       ; %bb.0:
+; GFX9-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-UNALIGNED-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-NOUNALIGNED-LABEL: v_load_constant_v3i32_align2:
+; GFX9-NOUNALIGNED:       ; %bb.0:
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NOUNALIGNED-NEXT:    v_add_co_u32_e32 v2, vcc, 10, v0
+; GFX9-NOUNALIGNED-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v5, v[2:3], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v0, v[0:1], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v1, v[2:3], off offset:-8
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v6, v[2:3], off offset:-6
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v7, v[2:3], off offset:-4
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v2, v[2:3], off offset:-2
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v4, 0xffff
+; GFX9-NOUNALIGNED-NEXT:    s_mov_b32 s4, 0xffff
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(5)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v5, v5, v4
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v3, v7, v4
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v0, v0, s4, v1
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v1, v6, v4, v3
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v2, v2, v4, v5
+; GFX9-NOUNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-UNALIGNED-LABEL: v_load_constant_v3i32_align2:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_mov_b32 s6, 0
+; GFX7-UNALIGNED-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-UNALIGNED-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-UNALIGNED-NEXT:    buffer_load_dwordx3 v[0:2], v[0:1], s[4:7], 0 addr64
+; GFX7-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-UNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-NOUNALIGNED-LABEL: v_load_constant_v3i32_align2:
+; GFX7-NOUNALIGNED:       ; %bb.0:
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s6, 0
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v2, v[0:1], s[4:7], 0 addr64 offset:10
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v3, v[0:1], s[4:7], 0 addr64
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v4, v[0:1], s[4:7], 0 addr64 offset:2
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v5, v[0:1], s[4:7], 0 addr64 offset:4
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v6, v[0:1], s[4:7], 0 addr64 offset:6
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v0, v[0:1], s[4:7], 0 addr64 offset:8
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s8, 0xffff
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, s8, v3
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v3, s8, v4
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(2)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v4, s8, v5
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v5, s8, v6
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v6, s8, v0
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v0, s8, v2
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v0, v1, v2
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v1, v4, v3
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v2, v6, v5
+; GFX7-NOUNALIGNED-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 2
+  ret <3 x i32> %load
+}
+
+define <3 x i32> @v_load_constant_v3i32_align4(<3 x i32> addrspace(4)* %ptr) {
+; GFX9-LABEL: v_load_constant_v3i32_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_load_constant_v3i32_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 s6, 0
+; GFX7-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:    buffer_load_dwordx3 v[0:2], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 4
+  ret <3 x i32> %load
+}
+
+define i96 @v_load_constant_i96_align8(i96 addrspace(4)* %ptr) {
+; GFX9-LABEL: v_load_constant_i96_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_load_constant_i96_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 s6, 0
+; GFX7-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:    buffer_load_dwordx3 v[0:2], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+  %load = load i96, i96 addrspace(4)* %ptr, align 8
+  ret i96 %load
+}
+
+define <3 x i32> @v_load_constant_v3i32_align8(<3 x i32> addrspace(4)* %ptr) {
+; GFX9-LABEL: v_load_constant_v3i32_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_load_constant_v3i32_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 s6, 0
+; GFX7-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:    buffer_load_dwordx3 v[0:2], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 8
+  ret <3 x i32> %load
+}
+
+define <6 x i16> @v_load_constant_v6i16_align8(<6 x i16> addrspace(4)* %ptr) {
+; GFX9-LABEL: v_load_constant_v6i16_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_load_constant_v6i16_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 s6, 0
+; GFX7-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:    buffer_load_dwordx3 v[6:8], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    v_lshrrev_b32_e32 v1, 16, v6
+; GFX7-NEXT:    v_lshrrev_b32_e32 v3, 16, v7
+; GFX7-NEXT:    v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT:    v_mov_b32_e32 v0, v6
+; GFX7-NEXT:    v_mov_b32_e32 v2, v7
+; GFX7-NEXT:    v_mov_b32_e32 v4, v8
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <6 x i16>, <6 x i16> addrspace(4)* %ptr, align 8
+  ret <6 x i16> %load
+}
+
+define <12 x i8> @v_load_constant_v12i8_align8(<12 x i8> addrspace(4)* %ptr) {
+; GFX9-LABEL: v_load_constant_v12i8_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_lshrrev_b32_e32 v13, 8, v0
+; GFX9-NEXT:    v_lshrrev_b32_e32 v12, 16, v0
+; GFX9-NEXT:    v_lshrrev_b32_e32 v3, 24, v0
+; GFX9-NEXT:    v_lshrrev_b32_e32 v5, 8, v1
+; GFX9-NEXT:    v_lshrrev_b32_e32 v6, 16, v1
+; GFX9-NEXT:    v_lshrrev_b32_e32 v7, 24, v1
+; GFX9-NEXT:    v_mov_b32_e32 v4, v1
+; GFX9-NEXT:    v_lshrrev_b32_e32 v9, 8, v2
+; GFX9-NEXT:    v_lshrrev_b32_e32 v10, 16, v2
+; GFX9-NEXT:    v_lshrrev_b32_e32 v11, 24, v2
+; GFX9-NEXT:    v_mov_b32_e32 v8, v2
+; GFX9-NEXT:    v_mov_b32_e32 v1, v13
+; GFX9-NEXT:    v_mov_b32_e32 v2, v12
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_load_constant_v12i8_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 s6, 0
+; GFX7-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:    buffer_load_dwordx3 v[0:2], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    v_lshrrev_b32_e32 v13, 8, v0
+; GFX7-NEXT:    v_lshrrev_b32_e32 v12, 16, v0
+; GFX7-NEXT:    v_lshrrev_b32_e32 v3, 24, v0
+; GFX7-NEXT:    v_lshrrev_b32_e32 v5, 8, v1
+; GFX7-NEXT:    v_lshrrev_b32_e32 v6, 16, v1
+; GFX7-NEXT:    v_lshrrev_b32_e32 v7, 24, v1
+; GFX7-NEXT:    v_mov_b32_e32 v4, v1
+; GFX7-NEXT:    v_lshrrev_b32_e32 v9, 8, v2
+; GFX7-NEXT:    v_lshrrev_b32_e32 v10, 16, v2
+; GFX7-NEXT:    v_lshrrev_b32_e32 v11, 24, v2
+; GFX7-NEXT:    v_mov_b32_e32 v8, v2
+; GFX7-NEXT:    v_mov_b32_e32 v1, v13
+; GFX7-NEXT:    v_mov_b32_e32 v2, v12
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <12 x i8>, <12 x i8> addrspace(4)* %ptr, align 8
+  ret <12 x i8> %load
+}
+
+define <3 x i32> @v_load_constant_v3i32_align16(<3 x i32> addrspace(4)* %ptr) {
+; GFX9-LABEL: v_load_constant_v3i32_align16:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_load_constant_v3i32_align16:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT:    s_mov_b32 s6, 0
+; GFX7-NEXT:    s_mov_b32 s7, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[4:5], 0
+; GFX7-NEXT:    buffer_load_dwordx3 v[0:2], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NEXT:    s_setpc_b64 s[30:31]
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 16
+  ret <3 x i32> %load
+}
+
+define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align1(<3 x i32> addrspace(4)* inreg %ptr) {
+; GFX9-UNALIGNED-LABEL: s_load_constant_v3i32_align1:
+; GFX9-UNALIGNED:       ; %bb.0:
+; GFX9-UNALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-UNALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-UNALIGNED-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-UNALIGNED-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-UNALIGNED-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-UNALIGNED-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX9-UNALIGNED-NEXT:    ; return to shader part epilog
+;
+; GFX9-NOUNALIGNED-LABEL: s_load_constant_v3i32_align1:
+; GFX9-NOUNALIGNED:       ; %bb.0:
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 1
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v3, s3
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 2
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v5, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v4, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 3
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v7, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v6, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 4
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v9, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v8, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 5
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v11, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v10, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 6
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v13, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v12, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 7
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v15, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v14, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 8
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v17, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v16, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 9
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v20, v[10:11], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v12, v[12:13], off
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v11, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v10, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 10
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s0, s0, 11
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v13, v[14:15], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v14, v[16:17], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v15, v[10:11], off
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v11, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v10, s2
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s1, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v16, v[10:11], off
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v11, s1
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v10, s0
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v10, v[10:11], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v0, v[0:1], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v1, v[2:3], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v2, v[4:5], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v3, v[6:7], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ubyte v4, v[8:9], off
+; GFX9-NOUNALIGNED-NEXT:    s_mov_b32 s5, 8
+; GFX9-NOUNALIGNED-NEXT:    s_movk_i32 s4, 0xff
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v18, 0xff
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v19, 8
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v0, v0, s4, v1
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(2)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, s4, v2
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v2, s4, v3
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NOUNALIGNED-NEXT:    v_or3_b32 v0, v0, v1, v2
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, v12, v18
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v2, v13, v18
+; GFX9-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_sdwa v0, v19, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v0, v4, v18, v0
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NOUNALIGNED-NEXT:    v_or3_b32 v1, v0, v1, v2
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_sdwa v0, v19, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v2, v10, v18
+; GFX9-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, v16, v18
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v0, v14, v18, v0
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v2, 24, v2
+; GFX9-NOUNALIGNED-NEXT:    v_or3_b32 v2, v0, v1, v2
+; GFX9-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX9-NOUNALIGNED-NEXT:    ; return to shader part epilog
+;
+; GFX7-UNALIGNED-LABEL: s_load_constant_v3i32_align1:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7-UNALIGNED-NEXT:    s_load_dword s0, s[0:1], 0x2
+; GFX7-UNALIGNED-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v0, s6
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v2, s8
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v2, s0
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v1, s7
+; GFX7-UNALIGNED-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-UNALIGNED-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-UNALIGNED-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX7-UNALIGNED-NEXT:    ; return to shader part epilog
+;
+; GFX7-NOUNALIGNED-LABEL: s_load_constant_v3i32_align1:
+; GFX7-NOUNALIGNED:       ; %bb.0:
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s2, -1
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s3, 0xf000
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v1, off, s[0:3], 0 offset:5
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v2, off, s[0:3], 0 offset:6
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v3, off, s[0:3], 0 offset:7
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v4, off, s[0:3], 0 offset:8
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v5, off, s[0:3], 0 offset:9
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v6, off, s[0:3], 0 offset:10
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v7, off, s[0:3], 0 offset:11
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v8, off, s[0:3], 0
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v9, off, s[0:3], 0 offset:1
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v10, off, s[0:3], 0 offset:2
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v11, off, s[0:3], 0 offset:3
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ubyte v12, off, s[0:3], 0 offset:4
+; GFX7-NOUNALIGNED-NEXT:    v_mov_b32_e32 v0, 0xff
+; GFX7-NOUNALIGNED-NEXT:    s_movk_i32 s4, 0xff
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(11)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, v1, v0
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(10)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v2, v2, v0
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(9)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v3, v3, v0
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(8)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v4, v4, v0
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(7)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v5, v5, v0
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(6)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v6, v6, v0
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v5, 8, v5
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v8, s4, v8
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v9, s4, v9
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v0, v7, v0
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(2)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v10, s4, v10
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v7, 8, v9
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v11, s4, v11
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v12, s4, v12
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v9, 16, v10
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v10, 24, v11
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v11, 24, v0
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v0, v8, v7
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v1, v12, v1
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v4, v4, v5
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v0, v0, v9
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v1, v1, v2
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v2, v4, v6
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v3, 24, v3
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v0, v0, v10
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v1, v1, v3
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v2, v2, v11
+; GFX7-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX7-NOUNALIGNED-NEXT:    ; return to shader part epilog
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 1
+  ret <3 x i32> %load
+}
+
+define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align2(<3 x i32> addrspace(4)* inreg %ptr) {
+; GFX9-UNALIGNED-LABEL: s_load_constant_v3i32_align2:
+; GFX9-UNALIGNED:       ; %bb.0:
+; GFX9-UNALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-UNALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-UNALIGNED-NEXT:    global_load_dwordx3 v[0:2], v[0:1], off
+; GFX9-UNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-UNALIGNED-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-UNALIGNED-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-UNALIGNED-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX9-UNALIGNED-NEXT:    ; return to shader part epilog
+;
+; GFX9-NOUNALIGNED-LABEL: s_load_constant_v3i32_align2:
+; GFX9-NOUNALIGNED:       ; %bb.0:
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 2
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v3, s3
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 4
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v5, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v4, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 6
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v7, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v6, s2
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s2, s0, 8
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s3, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NOUNALIGNED-NEXT:    s_add_u32 s0, s0, 10
+; GFX9-NOUNALIGNED-NEXT:    s_addc_u32 s1, s1, 0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v11, s1
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v9, s3
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v10, s0
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v8, s2
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v10, v[10:11], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v0, v[0:1], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v1, v[2:3], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v2, v[4:5], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v3, v[6:7], off
+; GFX9-NOUNALIGNED-NEXT:    global_load_ushort v4, v[8:9], off
+; GFX9-NOUNALIGNED-NEXT:    s_mov_b32 s4, 0xffff
+; GFX9-NOUNALIGNED-NEXT:    v_mov_b32_e32 v12, 0xffff
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v0, v0, s4, v1
+; GFX9-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v0, v3, v12
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v1, v2, v12, v0
+; GFX9-NOUNALIGNED-NEXT:    v_and_b32_e32 v0, v10, v12
+; GFX9-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
+; GFX9-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NOUNALIGNED-NEXT:    v_and_or_b32 v2, v4, v12, v0
+; GFX9-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX9-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX9-NOUNALIGNED-NEXT:    ; return to shader part epilog
+;
+; GFX7-UNALIGNED-LABEL: s_load_constant_v3i32_align2:
+; GFX7-UNALIGNED:       ; %bb.0:
+; GFX7-UNALIGNED-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7-UNALIGNED-NEXT:    s_load_dword s0, s[0:1], 0x2
+; GFX7-UNALIGNED-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v0, s6
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v2, s8
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v2, s0
+; GFX7-UNALIGNED-NEXT:    v_mov_b32_e32 v1, s7
+; GFX7-UNALIGNED-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-UNALIGNED-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-UNALIGNED-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX7-UNALIGNED-NEXT:    ; return to shader part epilog
+;
+; GFX7-NOUNALIGNED-LABEL: s_load_constant_v3i32_align2:
+; GFX7-NOUNALIGNED:       ; %bb.0:
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s2, -1
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s3, 0xf000
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v0, off, s[0:3], 0 offset:10
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v1, off, s[0:3], 0
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v2, off, s[0:3], 0 offset:2
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v3, off, s[0:3], 0 offset:4
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v4, off, s[0:3], 0 offset:6
+; GFX7-NOUNALIGNED-NEXT:    buffer_load_ushort v5, off, s[0:3], 0 offset:8
+; GFX7-NOUNALIGNED-NEXT:    s_mov_b32 s4, 0xffff
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(5)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v0, s4, v0
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(4)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v1, s4, v1
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(3)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v2, s4, v2
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(2)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v3, s4, v3
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(1)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v4, s4, v4
+; GFX7-NOUNALIGNED-NEXT:    s_waitcnt vmcnt(0)
+; GFX7-NOUNALIGNED-NEXT:    v_and_b32_e32 v5, s4, v5
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v6, 16, v0
+; GFX7-NOUNALIGNED-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v0, v1, v2
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v1, v3, v4
+; GFX7-NOUNALIGNED-NEXT:    v_or_b32_e32 v2, v5, v6
+; GFX7-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX7-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX7-NOUNALIGNED-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX7-NOUNALIGNED-NEXT:    ; return to shader part epilog
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 2
+  ret <3 x i32> %load
+}
+
+define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align4(<3 x i32> addrspace(4)* inreg %ptr) {
+; GFX9-LABEL: s_load_constant_v3i32_align4:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, s0
+; GFX9-NEXT:    s_mov_b32 s3, s1
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX9-NEXT:    s_load_dword s2, s[2:3], 0x8
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX7-LABEL: s_load_constant_v3i32_align4:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_mov_b32 s2, s0
+; GFX7-NEXT:    s_mov_b32 s3, s1
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX7-NEXT:    s_load_dword s2, s[2:3], 0x2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ; return to shader part epilog
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 4
+  ret <3 x i32> %load
+}
+
+define amdgpu_ps i96 @s_load_constant_i96_align8(i96 addrspace(4)* inreg %ptr) {
+; GFX9-LABEL: s_load_constant_i96_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, s0
+; GFX9-NEXT:    s_mov_b32 s3, s1
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX9-NEXT:    s_load_dword s2, s[2:3], 0x8
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX7-LABEL: s_load_constant_i96_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_mov_b32 s2, s0
+; GFX7-NEXT:    s_mov_b32 s3, s1
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX7-NEXT:    s_load_dword s2, s[2:3], 0x2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ; return to shader part epilog
+  %load = load i96, i96 addrspace(4)* %ptr, align 8
+  ret i96 %load
+}
+
+define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align8(<3 x i32> addrspace(4)* inreg %ptr) {
+; GFX9-LABEL: s_load_constant_v3i32_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, s0
+; GFX9-NEXT:    s_mov_b32 s3, s1
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX9-NEXT:    s_load_dword s2, s[2:3], 0x8
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX7-LABEL: s_load_constant_v3i32_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_mov_b32 s2, s0
+; GFX7-NEXT:    s_mov_b32 s3, s1
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX7-NEXT:    s_load_dword s2, s[2:3], 0x2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ; return to shader part epilog
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 8
+  ret <3 x i32> %load
+}
+
+define amdgpu_ps <3 x i32> @s_load_constant_v6i16_align8(<6 x i16> addrspace(4)* inreg %ptr) {
+; GFX9-LABEL: s_load_constant_v6i16_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_mov_b32 s2, s0
+; GFX9-NEXT:    s_mov_b32 s3, s1
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX9-NEXT:    s_load_dword s2, s[2:3], 0x8
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX7-LABEL: s_load_constant_v6i16_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_mov_b32 s2, s0
+; GFX7-NEXT:    s_mov_b32 s3, s1
+; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX7-NEXT:    s_load_dword s2, s[2:3], 0x2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    ; return to shader part epilog
+  %load = load <6 x i16>, <6 x i16> addrspace(4)* %ptr, align 8
+  %cast = bitcast <6 x i16> %load to <3 x i32>
+  ret <3 x i32> %cast
+}
+
+define amdgpu_ps <12 x i8> @s_load_constant_v12i8_align8(<12 x i8> addrspace(4)* inreg %ptr) {
+; GFX9-LABEL: s_load_constant_v12i8_align8:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x0
+; GFX9-NEXT:    s_load_dword s8, s[0:1], 0x8
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshr_b32 s1, s12, 8
+; GFX9-NEXT:    s_lshr_b32 s2, s12, 16
+; GFX9-NEXT:    s_lshr_b32 s3, s12, 24
+; GFX9-NEXT:    s_lshr_b32 s5, s13, 8
+; GFX9-NEXT:    s_lshr_b32 s6, s13, 16
+; GFX9-NEXT:    s_lshr_b32 s7, s13, 24
+; GFX9-NEXT:    s_lshr_b32 s9, s8, 8
+; GFX9-NEXT:    s_lshr_b32 s10, s8, 16
+; GFX9-NEXT:    s_lshr_b32 s11, s8, 24
+; GFX9-NEXT:    s_mov_b32 s0, s12
+; GFX9-NEXT:    s_mov_b32 s4, s13
+; GFX9-NEXT:    ; return to shader part epilog
+;
+; GFX7-LABEL: s_load_constant_v12i8_align8:
+; GFX7:       ; %bb.0:
+; GFX7-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x0
+; GFX7-NEXT:    s_load_dword s8, s[0:1], 0x2
+; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX7-NEXT:    s_lshr_b32 s1, s12, 8
+; GFX7-NEXT:    s_lshr_b32 s2, s12, 16
+; GFX7-NEXT:    s_lshr_b32 s3, s12, 24
+; GFX7-NEXT:    s_lshr_b32 s5, s13, 8
+; GFX7-NEXT:    s_lshr_b32 s6, s13, 16
+; GFX7-NEXT:    s_lshr_b32 s7, s13, 24
+; GFX7-NEXT:    s_lshr_b32 s9, s8, 8
+; GFX7-NEXT:    s_lshr_b32 s10, s8, 16
+; GFX7-NEXT:    s_lshr_b32 s11, s8, 24
+; GFX7-NEXT:    s_mov_b32 s0, s12
+; GFX7-NEXT:    s_mov_b32 s4, s13
+; GFX7-NEXT:    ; return to shader part epilog
+  %load = load <12 x i8>, <12 x i8> addrspace(4)* %ptr, align 8
+  ret <12 x i8> %load
+}
+
+define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align16(<3 x i32> addrspace(4)* inreg %ptr) {
+; GCN-LABEL: s_load_constant_v3i32_align16:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    ; return to shader part epilog
+  %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, align 16
+  ret <3 x i32> %load
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
index 292a1541347b..2ca83f20710f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
@@ -1,6 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -amdgpu-global-isel-new-legality -mtriple=amdgcn-amd-amdhsa -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
-# RUN: llc -amdgpu-global-isel-new-legality -mtriple=amdgcn-amd-amdhsa -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+# RUN: llc -amdgpu-global-isel-new-legality -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -amdgpu-global-isel-new-legality -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
 
 --- |
   define amdgpu_kernel void @load_global_v8i32_non_uniform(<8 x i32> addrspace(1)* %in) {
@@ -90,6 +90,18 @@
   define amdgpu_kernel void @load_constant_v8i32_vgpr_crash() { ret void }
   define amdgpu_kernel void @load_constant_v8i32_vgpr_crash_loop_phi() { ret void }
 
+  define amdgpu_kernel void @load_constant_v3i32_align4() { ret void }
+  define amdgpu_kernel void @load_constant_v3i32_align8() { ret void }
+  define amdgpu_kernel void @load_constant_v3i32_align16() { ret void }
+
+  define amdgpu_kernel void @load_constant_v6i16_align4() { ret void }
+  define amdgpu_kernel void @load_constant_v6i16_align8() { ret void }
+  define amdgpu_kernel void @load_constant_v6i16_align16() { ret void }
+
+  define amdgpu_kernel void @load_constant_i96_align4() { ret void }
+  define amdgpu_kernel void @load_constant_i96_align8() { ret void }
+  define amdgpu_kernel void @load_constant_i96_align16() { ret void }
+
   declare i32 @llvm.amdgcn.workitem.id.x() #0
   attributes #0 = { nounwind readnone }
 ...
@@ -648,3 +660,186 @@ body: |
     %4:_(p4) = COPY %1
     G_BR %bb.1
 ...
+
+---
+name: load_constant_v3i32_align4
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_v3i32_align4
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (invariant load 8, align 4, addrspace 4)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, addrspace 4)
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr(<3 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: [[INSERT:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0
+    ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64
+    ; CHECK: S_ENDPGM 0, implicit [[INSERT1]](<3 x s32>)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(<3 x s32>) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 4)
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: load_constant_v3i32_align8
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_v3i32_align8
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<2 x s32>) = G_LOAD [[COPY]](p4) :: (invariant load 8, addrspace 4)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, align 8, addrspace 4)
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr(<3 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: [[INSERT:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[DEF]], [[LOAD]](<2 x s32>), 0
+    ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(<3 x s32>) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64
+    ; CHECK: S_ENDPGM 0, implicit [[INSERT1]](<3 x s32>)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(<3 x s32>) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 8)
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: load_constant_v3i32_align16
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_v3i32_align16
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (invariant load 16, addrspace 4)
+    ; CHECK: [[EXTRACT:%[0-9]+]]:sgpr(<3 x s32>) = G_EXTRACT [[LOAD]](<4 x s32>), 0
+    ; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](<3 x s32>)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(<3 x s32>) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 16)
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: load_constant_v6i16_align4
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_v6i16_align4
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (invariant load 8, align 4, addrspace 4)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(<2 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, addrspace 4)
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr(<6 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: [[INSERT:%[0-9]+]]:sgpr(<6 x s16>) = G_INSERT [[DEF]], [[LOAD]](<4 x s16>), 0
+    ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(<6 x s16>) = G_INSERT [[INSERT]], [[LOAD1]](<2 x s16>), 64
+    ; CHECK: S_ENDPGM 0, implicit [[INSERT1]](<6 x s16>)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(<6 x s16>) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 4)
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: load_constant_v6i16_align8
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_v6i16_align8
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (invariant load 8, addrspace 4)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(<2 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, align 8, addrspace 4)
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr(<6 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: [[INSERT:%[0-9]+]]:sgpr(<6 x s16>) = G_INSERT [[DEF]], [[LOAD]](<4 x s16>), 0
+    ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(<6 x s16>) = G_INSERT [[INSERT]], [[LOAD1]](<2 x s16>), 64
+    ; CHECK: S_ENDPGM 0, implicit [[INSERT1]](<6 x s16>)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(<6 x s16>) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 8)
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: load_constant_v6i16_align16
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_v6i16_align16
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(<8 x s16>) = G_LOAD [[COPY]](p4) :: (invariant load 16, addrspace 4)
+    ; CHECK: [[EXTRACT:%[0-9]+]]:sgpr(<6 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 0
+    ; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](<6 x s16>)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(<6 x s16>) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 16)
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: load_constant_i96_align4
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_i96_align4
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(s64) = G_LOAD [[COPY]](p4) :: (invariant load 8, align 4, addrspace 4)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, addrspace 4)
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr(s96) = G_IMPLICIT_DEF
+    ; CHECK: [[INSERT:%[0-9]+]]:sgpr(s96) = G_INSERT [[DEF]], [[LOAD]](s64), 0
+    ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(s96) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64
+    ; CHECK: S_ENDPGM 0, implicit [[INSERT1]](s96)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(s96) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 4)
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: load_constant_i96_align8
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_i96_align8
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(s64) = G_LOAD [[COPY]](p4) :: (invariant load 8, addrspace 4)
+    ; CHECK: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 8
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: [[LOAD1:%[0-9]+]]:sgpr(s32) = G_LOAD [[PTR_ADD]](p4) :: (invariant load 4 + 8, align 8, addrspace 4)
+    ; CHECK: [[DEF:%[0-9]+]]:sgpr(s96) = G_IMPLICIT_DEF
+    ; CHECK: [[INSERT:%[0-9]+]]:sgpr(s96) = G_INSERT [[DEF]], [[LOAD]](s64), 0
+    ; CHECK: [[INSERT1:%[0-9]+]]:sgpr(s96) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64
+    ; CHECK: S_ENDPGM 0, implicit [[INSERT1]](s96)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(s96) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 8)
+    S_ENDPGM 0, implicit %1
+...
+
+---
+name: load_constant_i96_align16
+legalized: true
+
+body: |
+  bb.0:
+    liveins: $sgpr0_sgpr1
+    ; CHECK-LABEL: name: load_constant_i96_align16
+    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
+    ; CHECK: [[LOAD:%[0-9]+]]:sgpr(s128) = G_LOAD [[COPY]](p4) :: (invariant load 16, addrspace 4)
+    ; CHECK: [[EXTRACT:%[0-9]+]]:sgpr(s96) = G_EXTRACT [[LOAD]](s128), 0
+    ; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](s96)
+    %0:_(p4) = COPY $sgpr0_sgpr1
+    %1:_(s96) = G_LOAD %0 :: (invariant load 12, addrspace 4, align 16)
+    S_ENDPGM 0, implicit %1
+...


        


More information about the llvm-commits mailing list