[llvm] [AMDGPU][GFX12] Restrict scalar subword loads to PAL (PR #117576)

Juan Manuel Martinez CaamaƱo via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 25 08:40:35 PST 2024


https://github.com/jmmartinez created https://github.com/llvm/llvm-project/pull/117576

On gfx12, s_buffer_load_(i/u)(8/16) have a hw-bug that is triggered when:
* the stride is not a multiple of 4, or
* the stride is 0 and the num-records is not a multiple of 4

At the moment, these instructions are only generated for PAL.
But in this case, it is guaranteed that the buffers stride/num-records are
aligned to 4.

This patch prevents the emission of scalar subword loads to PAL, where
the bug would never be triggered, and avoid it in HSA (where it could be
triggered, but it's not used).

Solves SWDEV-498239

>From 4c0eb52dab6d11c6e029d01f56229985aa8ad60d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Mon, 25 Nov 2024 11:32:39 +0100
Subject: [PATCH 1/3] [AMDGPU][GFX12] Pre-commit tests: Restrict scalar subword
 loads to PAL

---
 llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll b/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
index 020c9dc130bb2a..94e04d66e770ad 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG %s
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefixes=GCN,GISEL %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefixes=GCN,GISEL %s
 
 define amdgpu_ps void @test_s_load_i8(ptr addrspace(4) inreg %in, ptr addrspace(1) %out) {
 ; GCN-LABEL: test_s_load_i8:

>From ea2cb848eedea4d0ab5e52ea807c0c8b1c79b07e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Mon, 25 Nov 2024 14:47:23 +0100
Subject: [PATCH 2/3] [AMDGPU][DAG][GFX12] Restrict scalar subword loads to PAL

On gfx12, s_buffer_load_(i/u)(8/16) have a hw-bug that is triggered when:
* the stride is not a multiple of 4, or
* the stride is 0 and the num-records is not a multiple of 4

At the moment, these instructions are only generated for PAL.
But in this case, it is guaranteed that the buffers stride/num-records are
aligned to 4.

This patch prevents the emission of scalar subword loads to PAL, where
the bug would never be triggered, and avoid it in HSA (where it could be
triggered, but it's not used).

Solves SWDEV-498239
---
 llvm/lib/Target/AMDGPU/GCNSubtarget.h         |  15 +
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  81 ++--
 .../AMDGPU/gfx12_scalar_subword_loads.ll      | 396 +++++++++++++-----
 3 files changed, 356 insertions(+), 136 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 18219174b16b1e..15d67d478465d6 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -460,6 +460,21 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
 
   bool hasScalarSubwordLoads() const { return getGeneration() >= GFX12; }
 
+  bool hasScalarSubwordBufferLoads() const {
+    Generation Gen = getGeneration();
+
+    // On gfx12, s_buffer_load_(i/u)(8/16) have a hw-bug that is triggered when:
+    // * the stride is not a multiple of 4, or
+    // * the stride is 0 and the num-records is not a multiple of 4
+    // At the moment, llvm.amdgcn.s.buffer.loads instruction are only generated
+    // for PAL by LLPC. In this case, it is guaranteed that the buffers
+    // stride/num-records are aligned to 4. In the HSA/Mesa case, we simply
+    // avoid these instructions.
+    if (Gen == GFX12)
+      return isAmdPalOS();
+    return hasScalarSubwordLoads();
+  }
+
   TrapHandlerAbi getTrapHandlerAbi() const {
     return isAmdHsaOS() ? TrapHandlerAbi::AMDHSA : TrapHandlerAbi::NONE;
   }
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f3b5e6985e8e0d..5b9bcfe8e39628 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6430,7 +6430,7 @@ void SITargetLowering::ReplaceNodeResults(SDNode *N,
               MachineMemOperand::MOInvariant,
           VT.getStoreSize(), Alignment);
       SDValue LoadVal;
-      if (!Offset->isDivergent()) {
+      if (!Offset->isDivergent() && Subtarget->hasScalarSubwordBufferLoads()) {
         SDValue Ops[] = {Rsrc, // source register
                          Offset, CachePolicy};
         SDValue BufferLoad =
@@ -8359,52 +8359,57 @@ SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
           MachineMemOperand::MOInvariant,
       VT.getStoreSize(), Alignment);
 
-  if (!Offset->isDivergent()) {
-    SDValue Ops[] = {Rsrc, Offset, CachePolicy};
-
-    // Lower llvm.amdgcn.s.buffer.load.{i16, u16} intrinsics. Initially, the
-    // s_buffer_load_u16 instruction is emitted for both signed and unsigned
-    // loads. Later, DAG combiner tries to combine s_buffer_load_u16 with sext
-    // and generates s_buffer_load_i16 (performSignExtendInRegCombine).
-    if (VT == MVT::i16 && Subtarget->hasScalarSubwordLoads()) {
-      SDValue BufferLoad =
-          DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD_USHORT, DL,
-                                  DAG.getVTList(MVT::i32), Ops, VT, MMO);
+  // We have a divergent offset. Emit a MUBUF buffer load instead. We can
+  // assume that the buffer is unswizzled.
+  SDValue BufferLoadOps[] = {
+      DAG.getEntryNode(),                    // Chain
+      Rsrc,                                  // rsrc
+      DAG.getConstant(0, DL, MVT::i32),      // vindex
+      {},                                    // voffset
+      {},                                    // soffset
+      {},                                    // offset
+      CachePolicy,                           // cachepolicy
+      DAG.getTargetConstant(0, DL, MVT::i1), // idxen
+  };
+
+  if (VT == MVT::i16 && Subtarget->hasScalarSubwordLoads()) {
+    if (!Offset->isDivergent() && Subtarget->hasScalarSubwordBufferLoads()) {
+      // Lower llvm.amdgcn.s.buffer.load.{i16, u16} intrinsics. Initially, the
+      // s_buffer_load_u16 instruction is emitted for both signed and unsigned
+      // loads. Later, DAG combiner tries to combine s_buffer_load_u16 with sext
+      // and generates s_buffer_load_i16 (performSignExtendInRegCombine).
+      SDValue SBufferLoadOps[] = {Rsrc, Offset, CachePolicy};
+      SDValue BufferLoad = DAG.getMemIntrinsicNode(
+          AMDGPUISD::SBUFFER_LOAD_USHORT, DL, DAG.getVTList(MVT::i32),
+          SBufferLoadOps, VT, MMO);
       return DAG.getNode(ISD::TRUNCATE, DL, VT, BufferLoad);
     }
 
+    // If s_buffer_load_u16/u8 is not supported by the platform (gfx12 when we
+    // cannot ensure the buffer's num-records/stride is not properly aligned)
+    // lower to a buffer_load_u8/u16
+    setBufferOffsets(Offset, DAG, &BufferLoadOps[3], Align(4));
+    return handleByteShortBufferLoads(DAG, VT, DL, BufferLoadOps, MMO);
+  }
+
+  if (!Offset->isDivergent()) {
+    SDValue SBufferLoadOps[] = {Rsrc, Offset, CachePolicy};
+
     // Widen vec3 load to vec4.
     if (VT.isVector() && VT.getVectorNumElements() == 3 &&
         !Subtarget->hasScalarDwordx3Loads()) {
       EVT WidenedVT =
           EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
       auto WidenedOp = DAG.getMemIntrinsicNode(
-          AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT,
-          MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize()));
+          AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), SBufferLoadOps,
+          WidenedVT, MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize()));
       auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp,
                                    DAG.getVectorIdxConstant(0, DL));
       return Subvector;
     }
 
     return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL,
-                                   DAG.getVTList(VT), Ops, VT, MMO);
-  }
-
-  // We have a divergent offset. Emit a MUBUF buffer load instead. We can
-  // assume that the buffer is unswizzled.
-  SDValue Ops[] = {
-      DAG.getEntryNode(),                    // Chain
-      Rsrc,                                  // rsrc
-      DAG.getConstant(0, DL, MVT::i32),      // vindex
-      {},                                    // voffset
-      {},                                    // soffset
-      {},                                    // offset
-      CachePolicy,                           // cachepolicy
-      DAG.getTargetConstant(0, DL, MVT::i1), // idxen
-  };
-  if (VT == MVT::i16 && Subtarget->hasScalarSubwordLoads()) {
-    setBufferOffsets(Offset, DAG, &Ops[3], Align(4));
-    return handleByteShortBufferLoads(DAG, VT, DL, Ops, MMO);
+                                   DAG.getVTList(VT), SBufferLoadOps, VT, MMO);
   }
 
   SmallVector<SDValue, 4> Loads;
@@ -8423,14 +8428,14 @@ SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
 
   // Use the alignment to ensure that the required offsets will fit into the
   // immediate offsets.
-  setBufferOffsets(Offset, DAG, &Ops[3],
+  setBufferOffsets(Offset, DAG, &BufferLoadOps[3],
                    NumLoads > 1 ? Align(16 * NumLoads) : Align(4));
 
-  uint64_t InstOffset = Ops[5]->getAsZExtVal();
+  uint64_t InstOffset = BufferLoadOps[5]->getAsZExtVal();
   for (unsigned i = 0; i < NumLoads; ++i) {
-    Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
-    Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops,
-                                        LoadVT, MMO, DAG));
+    BufferLoadOps[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32);
+    Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList,
+                                        BufferLoadOps, LoadVT, MMO, DAG));
   }
 
   if (NumElts == 8 || NumElts == 16)
@@ -12672,7 +12677,7 @@ SITargetLowering::performSignExtendInRegCombine(SDNode *N,
         VTSign->getVT() == MVT::i8) ||
        (Src.getOpcode() == AMDGPUISD::SBUFFER_LOAD_USHORT &&
         VTSign->getVT() == MVT::i16))) {
-    assert(Subtarget->hasScalarSubwordLoads() &&
+    assert(Subtarget->hasScalarSubwordBufferLoads() &&
            "s_buffer_load_{u8, i8} are supported "
            "in GFX12 (or newer) architectures.");
     EVT VT = Src.getValueType();
diff --git a/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll b/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
index 94e04d66e770ad..3926c33f496875 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG,DAG-DEFAULT %s
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefixes=GCN,GISEL %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG,DAG-PAL %s
 ; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefixes=GCN,GISEL %s
 
 define amdgpu_ps void @test_s_load_i8(ptr addrspace(4) inreg %in, ptr addrspace(1) %out) {
@@ -421,13 +421,28 @@ define amdgpu_ps void @test_s_load_u16_divergent(ptr addrspace(4) inreg %in, i32
 }
 
 define amdgpu_ps void @s_buffer_load_byte_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out) {
-; GCN-LABEL: s_buffer_load_byte_imm_offset:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_i8 s0, s[0:3], 0x4
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_byte_imm_offset:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    buffer_load_i8 v2, off, s[0:3], null offset:4
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_byte_imm_offset:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_i8 s0, s[0:3], 0x4
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_byte_imm_offset:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], 0x4
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.i8(<4 x i32> %src, i32 4, i32 0)
   %sext = sext i8 %ld to i32
@@ -436,13 +451,29 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_byte_sgpr(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %offset) {
-; GCN-LABEL: s_buffer_load_byte_sgpr:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x0
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_byte_sgpr:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DAG-DEFAULT-NEXT:    buffer_load_i8 v2, v2, s[0:3], null offen
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_byte_sgpr:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x0
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_byte_sgpr:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x0
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.i8(<4 x i32> %src, i32 %offset, i32 0)
   %sext = sext i8 %ld to i32
@@ -451,13 +482,29 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_byte_sgpr_or_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %in) {
-; GCN-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x64
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DAG-DEFAULT-NEXT:    buffer_load_i8 v2, v2, s[0:3], null offen offset:100
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x64
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x64
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %off = add nuw nsw i32 %in, 100
   %ld = call i8 @llvm.amdgcn.s.buffer.load.i8(<4 x i32> %src, i32 %off, i32 0)
@@ -488,15 +535,32 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ubyte_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out) {
-; GCN-LABEL: s_buffer_load_ubyte_imm_offset:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_u8 s0, s[0:3], 0x4
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    s_and_b32 s0, s0, 0xff
-; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_ubyte_imm_offset:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    buffer_load_u8 v2, off, s[0:3], null offset:4
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_ubyte_imm_offset:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_u8 s0, s[0:3], 0x4
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    s_and_b32 s0, s0, 0xff
+; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_ubyte_imm_offset:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], 0x4
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xff
+; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.u8(<4 x i32> %src, i32 4, i32 0)
   %zext = zext i8 %ld to i32
@@ -505,15 +569,33 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ubyte_sgpr(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %offset) {
-; GCN-LABEL: s_buffer_load_ubyte_sgpr:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x0
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    s_and_b32 s0, s0, 0xff
-; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_ubyte_sgpr:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DAG-DEFAULT-NEXT:    buffer_load_u8 v2, v2, s[0:3], null offen
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_ubyte_sgpr:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x0
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    s_and_b32 s0, s0, 0xff
+; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_ubyte_sgpr:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x0
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xff
+; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.u8(<4 x i32> %src, i32 %offset, i32 0)
   %zext = zext i8 %ld to i32
@@ -522,15 +604,33 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ubyte_sgpr_or_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %in) {
-; GCN-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x64
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    s_and_b32 s0, s0, 0xff
-; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DAG-DEFAULT-NEXT:    buffer_load_u8 v2, v2, s[0:3], null offen offset:100
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x64
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    s_and_b32 s0, s0, 0xff
+; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x64
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xff
+; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %off = add nuw nsw i32 %in, 100
   %ld = call i8 @llvm.amdgcn.s.buffer.load.u8(<4 x i32> %src, i32 %off, i32 0)
@@ -562,13 +662,28 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_short_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out) {
-; GCN-LABEL: s_buffer_load_short_imm_offset:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_i16 s0, s[0:3], 0x4
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_short_imm_offset:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    buffer_load_i16 v2, off, s[0:3], null offset:4
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_short_imm_offset:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_i16 s0, s[0:3], 0x4
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_short_imm_offset:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], 0x4
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.i16(<4 x i32> %src, i32 4, i32 0)
   %sext = sext i16 %ld to i32
@@ -577,13 +692,29 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_short_sgpr(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %offset) {
-; GCN-LABEL: s_buffer_load_short_sgpr:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x0
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_short_sgpr:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DAG-DEFAULT-NEXT:    buffer_load_i16 v2, v2, s[0:3], null offen
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_short_sgpr:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x0
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_short_sgpr:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x0
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.i16(<4 x i32> %src, i32 %offset, i32 0)
   %sext = sext i16 %ld to i32
@@ -592,13 +723,29 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_short_sgpr_or_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %in) {
-; GCN-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x64
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DAG-DEFAULT-NEXT:    buffer_load_i16 v2, v2, s[0:3], null offen offset:100
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x64
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x64
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %off = add nuw nsw i32 %in, 100
   %ld = call i16 @llvm.amdgcn.s.buffer.load.i16(<4 x i32> %src, i32 %off, i32 0)
@@ -629,15 +776,32 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ushort_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out) {
-; GCN-LABEL: s_buffer_load_ushort_imm_offset:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_u16 s0, s[0:3], 0x4
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    s_and_b32 s0, s0, 0xffff
-; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_ushort_imm_offset:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    buffer_load_u16 v2, off, s[0:3], null offset:4
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_ushort_imm_offset:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_u16 s0, s[0:3], 0x4
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    s_and_b32 s0, s0, 0xffff
+; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_ushort_imm_offset:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], 0x4
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xffff
+; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.u16(<4 x i32> %src, i32 4, i32 0)
   %zext = zext i16 %ld to i32
@@ -646,15 +810,33 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ushort_sgpr(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %offset) {
-; GCN-LABEL: s_buffer_load_ushort_sgpr:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x0
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    s_and_b32 s0, s0, 0xffff
-; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_ushort_sgpr:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DAG-DEFAULT-NEXT:    buffer_load_u16 v2, v2, s[0:3], null offen
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_ushort_sgpr:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x0
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    s_and_b32 s0, s0, 0xffff
+; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_ushort_sgpr:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x0
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xffff
+; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.u16(<4 x i32> %src, i32 %offset, i32 0)
   %zext = zext i16 %ld to i32
@@ -663,15 +845,33 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ushort_sgpr_or_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %in) {
-; GCN-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
-; GCN:       ; %bb.0: ; %main_body
-; GCN-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x64
-; GCN-NEXT:    s_wait_kmcnt 0x0
-; GCN-NEXT:    s_and_b32 s0, s0, 0xffff
-; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    global_store_b32 v[0:1], v2, off
-; GCN-NEXT:    s_endpgm
+; DAG-DEFAULT-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
+; DAG-DEFAULT:       ; %bb.0: ; %main_body
+; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DAG-DEFAULT-NEXT:    buffer_load_u16 v2, v2, s[0:3], null offen offset:100
+; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-DEFAULT-NEXT:    s_endpgm
+;
+; GISEL-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
+; GISEL:       ; %bb.0: ; %main_body
+; GISEL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x64
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    s_and_b32 s0, s0, 0xffff
+; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT:    v_mov_b32_e32 v2, s0
+; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; GISEL-NEXT:    s_endpgm
+;
+; DAG-PAL-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
+; DAG-PAL:       ; %bb.0: ; %main_body
+; DAG-PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x64
+; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
+; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xffff
+; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
+; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; DAG-PAL-NEXT:    s_endpgm
 main_body:
   %off = add nuw nsw i32 %in, 100
   %ld = call i16 @llvm.amdgcn.s.buffer.load.u16(<4 x i32> %src, i32 %off, i32 0)

>From 6fb9a6fe799b312e8ebaee4bf3d904c7f64199fc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Mon, 25 Nov 2024 14:54:40 +0100
Subject: [PATCH 3/3] [AMDGPU][ISel][GFX12] Restrict scalar subword loads to
 PAL

On gfx12, s_buffer_load_(i/u)(8/16) have a hw-bug that is triggered
when:
* the stride is not a multiple of 4, or
* the stride is 0 and the num-records is not a multiple of 4

At the moment, these instructions are only generated for PAL.
But in this case, it is guaranteed that the buffers stride/num-records
are aligned to 4.

This patch prevents the emission of scalar subword loads to PAL, where
the bug would never be triggered, and avoid it in HSA (where it could be
triggered, but it's not used).

Solves SWDEV-498239
---
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |  38 +-
 .../AMDGPU/gfx12_scalar_subword_loads.ll      | 572 ++++++++----------
 2 files changed, 275 insertions(+), 335 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 9bf1f281c32a09..bf60ae32b46108 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -6803,8 +6803,36 @@ bool AMDGPULegalizerInfo::legalizeSBufferLoad(LegalizerHelper &Helper,
   unsigned Size = Ty.getSizeInBits();
   MachineFunction &MF = B.getMF();
   unsigned Opc = 0;
+
+  const unsigned MemSize = (Size + 7) / 8;
+  const Align MemAlign = B.getDataLayout().getABITypeAlign(
+      getTypeForLLT(Ty, MF.getFunction().getContext()));
+
+  // FIXME: When intrinsic definition is fixed, this should have an MMO already.
+  MachineMemOperand *MMO = MF.getMachineMemOperand(
+      MachinePointerInfo(),
+      MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
+          MachineMemOperand::MOInvariant,
+      MemSize, MemAlign);
+
   if (Size < 32 && ST.hasScalarSubwordLoads()) {
     assert(Size == 8 || Size == 16);
+    if (!ST.hasScalarSubwordBufferLoads()) {
+      // fallback to S_BUFFER_LOAD_UBYTE/USHORT
+      MI.getOperand(1).setIntrinsicID(Intrinsic::amdgcn_raw_buffer_load);
+
+      Register ZeroReg =
+          B.getMRI()->createGenericVirtualRegister(LLT::scalar(32));
+      B.buildConstant(ZeroReg, 0);
+
+      MI.insert(MI.operands_begin() + 4,
+                {MachineOperand::CreateReg(ZeroReg, false)});
+
+      MI.addMemOperand(MF, MMO);
+      Observer.changedInstr(MI);
+      return true;
+    }
+
     Opc = Size == 8 ? AMDGPU::G_AMDGPU_S_BUFFER_LOAD_UBYTE
                     : AMDGPU::G_AMDGPU_S_BUFFER_LOAD_USHORT;
     // The 8-bit and 16-bit scalar buffer load instructions have 32-bit
@@ -6834,16 +6862,8 @@ bool AMDGPULegalizerInfo::legalizeSBufferLoad(LegalizerHelper &Helper,
   MI.setDesc(B.getTII().get(Opc));
   MI.removeOperand(1); // Remove intrinsic ID
 
-  // FIXME: When intrinsic definition is fixed, this should have an MMO already.
-  const unsigned MemSize = (Size + 7) / 8;
-  const Align MemAlign = B.getDataLayout().getABITypeAlign(
-      getTypeForLLT(Ty, MF.getFunction().getContext()));
-  MachineMemOperand *MMO = MF.getMachineMemOperand(
-      MachinePointerInfo(),
-      MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
-          MachineMemOperand::MOInvariant,
-      MemSize, MemAlign);
   MI.addMemOperand(MF, MMO);
+
   if (Dst != OrigDst) {
     MI.getOperand(0).setReg(Dst);
     B.setInsertPt(B.getMBB(), ++B.getInsertPt());
diff --git a/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll b/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
index 3926c33f496875..ad89b1f91143b4 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx12_scalar_subword_loads.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG,DAG-DEFAULT %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefixes=GCN,GISEL %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG,DAG-PAL %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefixes=GCN,GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG,DEFAULT %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefixes=GCN,GISEL,DEFAULT %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,DAG,PAL %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 -verify-machineinstrs -global-isel=1 < %s | FileCheck -check-prefixes=GCN,GISEL,PAL,PAL-GISEL %s
 
 define amdgpu_ps void @test_s_load_i8(ptr addrspace(4) inreg %in, ptr addrspace(1) %out) {
 ; GCN-LABEL: test_s_load_i8:
@@ -421,28 +421,20 @@ define amdgpu_ps void @test_s_load_u16_divergent(ptr addrspace(4) inreg %in, i32
 }
 
 define amdgpu_ps void @s_buffer_load_byte_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out) {
-; DAG-DEFAULT-LABEL: s_buffer_load_byte_imm_offset:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    buffer_load_i8 v2, off, s[0:3], null offset:4
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_byte_imm_offset:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_i8 s0, s[0:3], 0x4
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_byte_imm_offset:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], 0x4
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_byte_imm_offset:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    buffer_load_i8 v2, off, s[0:3], null offset:4
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_byte_imm_offset:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], 0x4
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.i8(<4 x i32> %src, i32 4, i32 0)
   %sext = sext i8 %ld to i32
@@ -451,29 +443,21 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_byte_sgpr(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %offset) {
-; DAG-DEFAULT-LABEL: s_buffer_load_byte_sgpr:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
-; DAG-DEFAULT-NEXT:    buffer_load_i8 v2, v2, s[0:3], null offen
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_byte_sgpr:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x0
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_byte_sgpr:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x0
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_byte_sgpr:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DEFAULT-NEXT:    buffer_load_i8 v2, v2, s[0:3], null offen
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_byte_sgpr:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x0
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.i8(<4 x i32> %src, i32 %offset, i32 0)
   %sext = sext i8 %ld to i32
@@ -482,29 +466,21 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_byte_sgpr_or_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %in) {
-; DAG-DEFAULT-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
-; DAG-DEFAULT-NEXT:    buffer_load_i8 v2, v2, s[0:3], null offen offset:100
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x64
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x64
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DEFAULT-NEXT:    buffer_load_i8 v2, v2, s[0:3], null offen offset:100
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_byte_sgpr_or_imm_offset:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_i8 s0, s[0:3], s4 offset:0x64
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %off = add nuw nsw i32 %in, 100
   %ld = call i8 @llvm.amdgcn.s.buffer.load.i8(<4 x i32> %src, i32 %off, i32 0)
@@ -521,12 +497,19 @@ define amdgpu_ps void @s_buffer_load_byte_sgpr_or_imm_offset_divergent(<4 x i32>
 ; DAG-NEXT:    global_store_b32 v[0:1], v2, off
 ; DAG-NEXT:    s_endpgm
 ;
-; GISEL-LABEL: s_buffer_load_byte_sgpr_or_imm_offset_divergent:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    buffer_load_b32 v2, v2, s[0:3], null offen
-; GISEL-NEXT:    s_wait_loadcnt 0x0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_byte_sgpr_or_imm_offset_divergent:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    buffer_load_i8 v2, v2, s[0:3], null offen
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-GISEL-LABEL: s_buffer_load_byte_sgpr_or_imm_offset_divergent:
+; PAL-GISEL:       ; %bb.0: ; %main_body
+; PAL-GISEL-NEXT:    buffer_load_b32 v2, v2, s[0:3], null offen
+; PAL-GISEL-NEXT:    s_wait_loadcnt 0x0
+; PAL-GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-GISEL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.i8(<4 x i32> %src, i32 %offset, i32 0)
   %sext = sext i8 %ld to i32
@@ -535,32 +518,22 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ubyte_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out) {
-; DAG-DEFAULT-LABEL: s_buffer_load_ubyte_imm_offset:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    buffer_load_u8 v2, off, s[0:3], null offset:4
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_ubyte_imm_offset:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_u8 s0, s[0:3], 0x4
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    s_and_b32 s0, s0, 0xff
-; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_ubyte_imm_offset:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], 0x4
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xff
-; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_ubyte_imm_offset:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    buffer_load_u8 v2, off, s[0:3], null offset:4
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_ubyte_imm_offset:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], 0x4
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    s_and_b32 s0, s0, 0xff
+; PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.u8(<4 x i32> %src, i32 4, i32 0)
   %zext = zext i8 %ld to i32
@@ -569,33 +542,23 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ubyte_sgpr(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %offset) {
-; DAG-DEFAULT-LABEL: s_buffer_load_ubyte_sgpr:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
-; DAG-DEFAULT-NEXT:    buffer_load_u8 v2, v2, s[0:3], null offen
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_ubyte_sgpr:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x0
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    s_and_b32 s0, s0, 0xff
-; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_ubyte_sgpr:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x0
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xff
-; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_ubyte_sgpr:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DEFAULT-NEXT:    buffer_load_u8 v2, v2, s[0:3], null offen
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_ubyte_sgpr:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x0
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    s_and_b32 s0, s0, 0xff
+; PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.u8(<4 x i32> %src, i32 %offset, i32 0)
   %zext = zext i8 %ld to i32
@@ -604,33 +567,23 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ubyte_sgpr_or_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %in) {
-; DAG-DEFAULT-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
-; DAG-DEFAULT-NEXT:    buffer_load_u8 v2, v2, s[0:3], null offen offset:100
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x64
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    s_and_b32 s0, s0, 0xff
-; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x64
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xff
-; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DEFAULT-NEXT:    buffer_load_u8 v2, v2, s[0:3], null offen offset:100
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_u8 s0, s[0:3], s4 offset:0x64
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    s_and_b32 s0, s0, 0xff
+; PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %off = add nuw nsw i32 %in, 100
   %ld = call i8 @llvm.amdgcn.s.buffer.load.u8(<4 x i32> %src, i32 %off, i32 0)
@@ -647,13 +600,20 @@ define amdgpu_ps void @s_buffer_load_ubyte_sgpr_or_imm_offset_divergent(<4 x i32
 ; DAG-NEXT:    global_store_b32 v[0:1], v2, off
 ; DAG-NEXT:    s_endpgm
 ;
-; GISEL-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset_divergent:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    buffer_load_b32 v2, v2, s[0:3], null offen
-; GISEL-NEXT:    s_wait_loadcnt 0x0
-; GISEL-NEXT:    v_and_b32_e32 v2, 0xff, v2
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset_divergent:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    buffer_load_u8 v2, v2, s[0:3], null offen
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-GISEL-LABEL: s_buffer_load_ubyte_sgpr_or_imm_offset_divergent:
+; PAL-GISEL:       ; %bb.0: ; %main_body
+; PAL-GISEL-NEXT:    buffer_load_b32 v2, v2, s[0:3], null offen
+; PAL-GISEL-NEXT:    s_wait_loadcnt 0x0
+; PAL-GISEL-NEXT:    v_and_b32_e32 v2, 0xff, v2
+; PAL-GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-GISEL-NEXT:    s_endpgm
 main_body:
   %ld = call i8 @llvm.amdgcn.s.buffer.load.u8(<4 x i32> %src, i32 %offset, i32 0)
   %zext = zext i8 %ld to i32
@@ -662,28 +622,20 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_short_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out) {
-; DAG-DEFAULT-LABEL: s_buffer_load_short_imm_offset:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    buffer_load_i16 v2, off, s[0:3], null offset:4
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_short_imm_offset:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_i16 s0, s[0:3], 0x4
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_short_imm_offset:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], 0x4
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_short_imm_offset:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    buffer_load_i16 v2, off, s[0:3], null offset:4
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_short_imm_offset:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], 0x4
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.i16(<4 x i32> %src, i32 4, i32 0)
   %sext = sext i16 %ld to i32
@@ -692,29 +644,21 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_short_sgpr(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %offset) {
-; DAG-DEFAULT-LABEL: s_buffer_load_short_sgpr:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
-; DAG-DEFAULT-NEXT:    buffer_load_i16 v2, v2, s[0:3], null offen
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_short_sgpr:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x0
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_short_sgpr:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x0
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_short_sgpr:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DEFAULT-NEXT:    buffer_load_i16 v2, v2, s[0:3], null offen
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_short_sgpr:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x0
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.i16(<4 x i32> %src, i32 %offset, i32 0)
   %sext = sext i16 %ld to i32
@@ -723,29 +667,21 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_short_sgpr_or_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %in) {
-; DAG-DEFAULT-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
-; DAG-DEFAULT-NEXT:    buffer_load_i16 v2, v2, s[0:3], null offen offset:100
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x64
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x64
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DEFAULT-NEXT:    buffer_load_i16 v2, v2, s[0:3], null offen offset:100
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_short_sgpr_or_imm_offset:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_i16 s0, s[0:3], s4 offset:0x64
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %off = add nuw nsw i32 %in, 100
   %ld = call i16 @llvm.amdgcn.s.buffer.load.i16(<4 x i32> %src, i32 %off, i32 0)
@@ -762,12 +698,19 @@ define amdgpu_ps void @s_buffer_load_short_sgpr_or_imm_offset_divergent(<4 x i32
 ; DAG-NEXT:    global_store_b32 v[0:1], v2, off
 ; DAG-NEXT:    s_endpgm
 ;
-; GISEL-LABEL: s_buffer_load_short_sgpr_or_imm_offset_divergent:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    buffer_load_b32 v2, v2, s[0:3], null offen
-; GISEL-NEXT:    s_wait_loadcnt 0x0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_short_sgpr_or_imm_offset_divergent:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    buffer_load_i16 v2, v2, s[0:3], null offen
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-GISEL-LABEL: s_buffer_load_short_sgpr_or_imm_offset_divergent:
+; PAL-GISEL:       ; %bb.0: ; %main_body
+; PAL-GISEL-NEXT:    buffer_load_b32 v2, v2, s[0:3], null offen
+; PAL-GISEL-NEXT:    s_wait_loadcnt 0x0
+; PAL-GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-GISEL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.i16(<4 x i32> %src, i32 %offset, i32 0)
   %sext = sext i16 %ld to i32
@@ -776,32 +719,22 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ushort_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out) {
-; DAG-DEFAULT-LABEL: s_buffer_load_ushort_imm_offset:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    buffer_load_u16 v2, off, s[0:3], null offset:4
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_ushort_imm_offset:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_u16 s0, s[0:3], 0x4
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    s_and_b32 s0, s0, 0xffff
-; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_ushort_imm_offset:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], 0x4
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xffff
-; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_ushort_imm_offset:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    buffer_load_u16 v2, off, s[0:3], null offset:4
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_ushort_imm_offset:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], 0x4
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    s_and_b32 s0, s0, 0xffff
+; PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.u16(<4 x i32> %src, i32 4, i32 0)
   %zext = zext i16 %ld to i32
@@ -810,33 +743,23 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ushort_sgpr(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %offset) {
-; DAG-DEFAULT-LABEL: s_buffer_load_ushort_sgpr:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
-; DAG-DEFAULT-NEXT:    buffer_load_u16 v2, v2, s[0:3], null offen
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_ushort_sgpr:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x0
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    s_and_b32 s0, s0, 0xffff
-; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_ushort_sgpr:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x0
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xffff
-; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_ushort_sgpr:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DEFAULT-NEXT:    buffer_load_u16 v2, v2, s[0:3], null offen
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_ushort_sgpr:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x0
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    s_and_b32 s0, s0, 0xffff
+; PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.u16(<4 x i32> %src, i32 %offset, i32 0)
   %zext = zext i16 %ld to i32
@@ -845,33 +768,23 @@ main_body:
 }
 
 define amdgpu_ps void @s_buffer_load_ushort_sgpr_or_imm_offset(<4 x i32> inreg %src, ptr addrspace(1) nocapture %out, i32 inreg %in) {
-; DAG-DEFAULT-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
-; DAG-DEFAULT:       ; %bb.0: ; %main_body
-; DAG-DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
-; DAG-DEFAULT-NEXT:    buffer_load_u16 v2, v2, s[0:3], null offen offset:100
-; DAG-DEFAULT-NEXT:    s_wait_loadcnt 0x0
-; DAG-DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-DEFAULT-NEXT:    s_endpgm
-;
-; GISEL-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x64
-; GISEL-NEXT:    s_wait_kmcnt 0x0
-; GISEL-NEXT:    s_and_b32 s0, s0, 0xffff
-; GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-NEXT:    v_mov_b32_e32 v2, s0
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
-;
-; DAG-PAL-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
-; DAG-PAL:       ; %bb.0: ; %main_body
-; DAG-PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x64
-; DAG-PAL-NEXT:    s_wait_kmcnt 0x0
-; DAG-PAL-NEXT:    s_and_b32 s0, s0, 0xffff
-; DAG-PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; DAG-PAL-NEXT:    v_mov_b32_e32 v2, s0
-; DAG-PAL-NEXT:    global_store_b32 v[0:1], v2, off
-; DAG-PAL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    v_mov_b32_e32 v2, s4
+; DEFAULT-NEXT:    buffer_load_u16 v2, v2, s[0:3], null offen offset:100
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset:
+; PAL:       ; %bb.0: ; %main_body
+; PAL-NEXT:    s_buffer_load_u16 s0, s[0:3], s4 offset:0x64
+; PAL-NEXT:    s_wait_kmcnt 0x0
+; PAL-NEXT:    s_and_b32 s0, s0, 0xffff
+; PAL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; PAL-NEXT:    v_mov_b32_e32 v2, s0
+; PAL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-NEXT:    s_endpgm
 main_body:
   %off = add nuw nsw i32 %in, 100
   %ld = call i16 @llvm.amdgcn.s.buffer.load.u16(<4 x i32> %src, i32 %off, i32 0)
@@ -888,13 +801,20 @@ define amdgpu_ps void @s_buffer_load_ushort_sgpr_or_imm_offset_divergent(<4 x i3
 ; DAG-NEXT:    global_store_b32 v[0:1], v2, off
 ; DAG-NEXT:    s_endpgm
 ;
-; GISEL-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset_divergent:
-; GISEL:       ; %bb.0: ; %main_body
-; GISEL-NEXT:    buffer_load_b32 v2, v2, s[0:3], null offen
-; GISEL-NEXT:    s_wait_loadcnt 0x0
-; GISEL-NEXT:    v_and_b32_e32 v2, 0xffff, v2
-; GISEL-NEXT:    global_store_b32 v[0:1], v2, off
-; GISEL-NEXT:    s_endpgm
+; DEFAULT-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset_divergent:
+; DEFAULT:       ; %bb.0: ; %main_body
+; DEFAULT-NEXT:    buffer_load_u16 v2, v2, s[0:3], null offen
+; DEFAULT-NEXT:    s_wait_loadcnt 0x0
+; DEFAULT-NEXT:    global_store_b32 v[0:1], v2, off
+; DEFAULT-NEXT:    s_endpgm
+;
+; PAL-GISEL-LABEL: s_buffer_load_ushort_sgpr_or_imm_offset_divergent:
+; PAL-GISEL:       ; %bb.0: ; %main_body
+; PAL-GISEL-NEXT:    buffer_load_b32 v2, v2, s[0:3], null offen
+; PAL-GISEL-NEXT:    s_wait_loadcnt 0x0
+; PAL-GISEL-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; PAL-GISEL-NEXT:    global_store_b32 v[0:1], v2, off
+; PAL-GISEL-NEXT:    s_endpgm
 main_body:
   %ld = call i16 @llvm.amdgcn.s.buffer.load.u16(<4 x i32> %src, i32 %offset, i32 0)
   %zext = zext i16 %ld to i32



More information about the llvm-commits mailing list