[llvm] [AMDGPU] Handle nontemporal and amdgpu.last.use metadata in amdgpu-lower-buffer-fat-pointers (PR #120139)

Acim Maravic via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 16 12:26:51 PST 2024


https://github.com/Acim-Maravic created https://github.com/llvm/llvm-project/pull/120139

None

>From 24ed04bd54c50591392babb5ac2f3eddc6842900 Mon Sep 17 00:00:00 2001
From: Acim Maravic <Acim.Maravic at amd.com>
Date: Mon, 16 Dec 2024 21:16:46 +0100
Subject: [PATCH] [AMDGPU] Handle nontemporal and amdgpu.last.use metadata in
 amdgpu-lower-buffer-fat-pointers

---
 .../AMDGPU/AMDGPULowerBufferFatPointers.cpp   |   4 +
 .../lower-buffer-fat-pointers-memops.ll       |  10 +-
 ...wer-buffer-fat-pointers-memory-metadata.ll | 383 ++++++++++++++++++
 3 files changed, 392 insertions(+), 5 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memory-metadata.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index c7cdd7a37282c7..14e814f64ad776 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -1088,6 +1088,10 @@ Value *SplitPtrStructs::handleMemoryInst(Instruction *I, Value *Arg, Value *Ptr,
     Aux |= (Aux & AMDGPU::CPol::GLC ? AMDGPU::CPol::DLC : 0);
   if (IsVolatile)
     Aux |= AMDGPU::CPol::VOLATILE;
+  if (I->hasMetadata("amdgpu.last.use"))
+    Aux |= AMDGPU::CPol::TH_LU;
+  if (I->hasMetadata("nontemporal"))
+    Aux |= AMDGPU::CPol::TH_NT;
   Args.push_back(IRB.getInt32(Aux));
 
   Intrinsic::ID IID = Intrinsic::not_intrinsic;
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
index 57028a0f9b14f3..9e72470e37db43 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
@@ -11,11 +11,11 @@ define void @loads(ptr addrspace(8) %buf) {
 ; CHECK-NEXT:    [[SCALAR:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 0)
 ; CHECK-NEXT:    [[VEC2:%.*]] = call <2 x float> @llvm.amdgcn.raw.ptr.buffer.load.v2f32(ptr addrspace(8) align 8 [[BUF]], i32 16, i32 0, i32 0)
 ; CHECK-NEXT:    [[VEC4:%.*]] = call <4 x float> @llvm.amdgcn.raw.ptr.buffer.load.v4f32(ptr addrspace(8) align 16 [[BUF]], i32 16, i32 0, i32 0)
-; CHECK-NEXT:    [[NONTEMPORAL:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 2), !nontemporal [[META0:![0-9]+]]
+; CHECK-NEXT:    [[NONTEMPORAL:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 3), !nontemporal [[META0:![0-9]+]]
 ; CHECK-NEXT:    [[INVARIANT:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 0), !invariant.load [[META1:![0-9]+]]
-; CHECK-NEXT:    [[NONTEMPORAL_INVARIANT:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 0), !invariant.load [[META1]], !nontemporal [[META0]]
+; CHECK-NEXT:    [[NONTEMPORAL_INVARIANT:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1), !invariant.load [[META1]], !nontemporal [[META0]]
 ; CHECK-NEXT:    [[VOLATILE:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483648)
-; CHECK-NEXT:    [[VOLATILE_NONTEMPORAL:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483646), !nontemporal [[META0]]
+; CHECK-NEXT:    [[VOLATILE_NONTEMPORAL:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483645), !nontemporal [[META0]]
 ; CHECK-NEXT:    fence syncscope("wavefront") release
 ; CHECK-NEXT:    [[ATOMIC:%.*]] = call float @llvm.amdgcn.raw.ptr.atomic.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483647)
 ; CHECK-NEXT:    fence syncscope("wavefront") acquire
@@ -50,9 +50,9 @@ define void @stores(ptr addrspace(8) %buf, float %f, <4 x float> %f4) {
 ; CHECK-SAME: (ptr addrspace(8) [[BUF:%.*]], float [[F:%.*]], <4 x float> [[F4:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[F]], ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 0)
 ; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.v4f32(<4 x float> [[F4]], ptr addrspace(8) align 16 [[BUF]], i32 16, i32 0, i32 0)
-; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[F]], ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 2), !nontemporal [[META0]]
+; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[F]], ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 3), !nontemporal [[META0]]
 ; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[F]], ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483648)
-; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[F]], ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483646), !nontemporal [[META0]]
+; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[F]], ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483645), !nontemporal [[META0]]
 ; CHECK-NEXT:    fence syncscope("wavefront") release
 ; CHECK-NEXT:    call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[F]], ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483647)
 ; CHECK-NEXT:    fence syncscope("wavefront") acquire
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memory-metadata.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memory-metadata.ll
new file mode 100644
index 00000000000000..37e6e98a365725
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memory-metadata.ll
@@ -0,0 +1,383 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 < %s | FileCheck --check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -O0 -mcpu=gfx1200 -mattr=+cumode < %s | FileCheck --check-prefix=GFX12 %s
+
+define amdgpu_kernel void @buffer_last_use_load_0(ptr addrspace(7) %in, ptr addrspace(7) %out) {
+; GFX12-LABEL: buffer_last_use_load_0:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    s_mov_b64 s[0:1], s[4:5]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_load_b64 s[4:5], s[0:1], 0x0
+; GFX12-NEXT:    s_load_b64 s[10:11], s[0:1], 0x8
+; GFX12-NEXT:    s_load_b32 s16, s[0:1], 0x10
+; GFX12-NEXT:    s_load_b64 s[6:7], s[0:1], 0x20
+; GFX12-NEXT:    s_load_b64 s[2:3], s[0:1], 0x28
+; GFX12-NEXT:    s_load_b32 s14, s[0:1], 0x30
+; GFX12-NEXT:    ; implicit-def: $sgpr0
+; GFX12-NEXT:    ; implicit-def: $sgpr1
+; GFX12-NEXT:    ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
+; GFX12-NEXT:    s_mov_b32 s17, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s1, 32
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_lshl_b64 s[12:13], s[10:11], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[8:9], s[4:5], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s13, s9
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr8 killed $sgpr8 killed $sgpr8_sgpr9
+; GFX12-NEXT:    s_lshr_b64 s[10:11], s[10:11], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[16:17], s[16:17], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[10:11], s[10:11], s[16:17]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s0, s11
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s12, s10
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9_sgpr10_sgpr11
+; GFX12-NEXT:    s_mov_b32 s9, s13
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s10, s12
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s11, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s5, s4
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; implicit-def: $sgpr0
+; GFX12-NEXT:    ; implicit-def: $sgpr4
+; GFX12-NEXT:    ; kill: def $sgpr14 killed $sgpr14 def $sgpr14_sgpr15
+; GFX12-NEXT:    s_mov_b32 s15, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[16:17], s[2:3], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[12:13], s[6:7], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[16:17], s[12:13], s[16:17]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s13, s17
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s0, s16
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[2:3], s[2:3], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[14:15], s[14:15], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[2:3], s[2:3], s[14:15]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s4, s3
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s12, s2
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
+; GFX12-NEXT:    s_mov_b32 s1, s13
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s2, s12
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s3, s4
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s4, s6
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    v_mov_b32_e32 v0, s5
+; GFX12-NEXT:    buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
+; GFX12-NEXT:    v_mov_b32_e32 v1, s4
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    buffer_store_b32 v0, v1, s[0:3], null offen
+; GFX12-NEXT:    s_endpgm
+entry:
+  %val = load i32, ptr addrspace(7) %in, !amdgpu.last.use !{}
+  store i32 %val, ptr addrspace(7) %out
+  ret void
+}
+
+define amdgpu_kernel void @flat_last_use_load_1(ptr addrspace(7) %in, ptr addrspace(7) %out) {
+; GFX12-LABEL: flat_last_use_load_1:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    s_mov_b64 s[0:1], s[4:5]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_load_b64 s[6:7], s[0:1], 0x0
+; GFX12-NEXT:    s_load_b64 s[10:11], s[0:1], 0x8
+; GFX12-NEXT:    s_load_b32 s16, s[0:1], 0x10
+; GFX12-NEXT:    s_load_b64 s[4:5], s[0:1], 0x20
+; GFX12-NEXT:    s_load_b64 s[2:3], s[0:1], 0x28
+; GFX12-NEXT:    s_load_b32 s14, s[0:1], 0x30
+; GFX12-NEXT:    ; implicit-def: $sgpr0
+; GFX12-NEXT:    ; implicit-def: $sgpr1
+; GFX12-NEXT:    ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
+; GFX12-NEXT:    s_mov_b32 s17, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s1, 32
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_lshl_b64 s[12:13], s[10:11], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[8:9], s[6:7], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s13, s9
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr8 killed $sgpr8 killed $sgpr8_sgpr9
+; GFX12-NEXT:    s_lshr_b64 s[10:11], s[10:11], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[16:17], s[16:17], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[10:11], s[10:11], s[16:17]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s0, s11
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s12, s10
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9_sgpr10_sgpr11
+; GFX12-NEXT:    s_mov_b32 s9, s13
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s10, s12
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s11, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
+; GFX12-NEXT:    ; implicit-def: $sgpr0
+; GFX12-NEXT:    ; implicit-def: $sgpr7
+; GFX12-NEXT:    ; kill: def $sgpr14 killed $sgpr14 def $sgpr14_sgpr15
+; GFX12-NEXT:    s_mov_b32 s15, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[16:17], s[2:3], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[12:13], s[4:5], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[16:17], s[12:13], s[16:17]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s13, s17
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s0, s16
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[2:3], s[2:3], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[14:15], s[14:15], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[2:3], s[2:3], s[14:15]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s7, s3
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s12, s2
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
+; GFX12-NEXT:    s_mov_b32 s1, s13
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s2, s12
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s3, s7
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
+; GFX12-NEXT:    s_mov_b32 s5, 0x3ff
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    v_and_b32_e64 v0, v0, s5
+; GFX12-NEXT:    s_mov_b32 s5, 2
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    v_lshl_add_u32 v0, v0, s5, s6
+; GFX12-NEXT:    buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
+; GFX12-NEXT:    v_mov_b32_e32 v1, s4
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    buffer_store_b32 v0, v1, s[0:3], null offen
+; GFX12-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %val.gep = getelementptr inbounds i32, ptr addrspace(7) %in, i32 %tid
+  %val = load i32, ptr addrspace(7) %val.gep, align 4, !amdgpu.last.use !{}
+  store i32 %val, ptr addrspace(7) %out
+  ret void
+}
+
+define amdgpu_kernel void @buffer_last_use_and_volatile_load(ptr addrspace(7) %in, ptr addrspace(7) %out) {
+; GFX12-LABEL: buffer_last_use_and_volatile_load:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    s_mov_b64 s[0:1], s[4:5]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_load_b64 s[4:5], s[0:1], 0x0
+; GFX12-NEXT:    s_load_b64 s[10:11], s[0:1], 0x8
+; GFX12-NEXT:    s_load_b32 s16, s[0:1], 0x10
+; GFX12-NEXT:    s_load_b64 s[6:7], s[0:1], 0x20
+; GFX12-NEXT:    s_load_b64 s[2:3], s[0:1], 0x28
+; GFX12-NEXT:    s_load_b32 s14, s[0:1], 0x30
+; GFX12-NEXT:    ; implicit-def: $sgpr0
+; GFX12-NEXT:    ; implicit-def: $sgpr1
+; GFX12-NEXT:    ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
+; GFX12-NEXT:    s_mov_b32 s17, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s1, 32
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_lshl_b64 s[12:13], s[10:11], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[8:9], s[4:5], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s13, s9
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr8 killed $sgpr8 killed $sgpr8_sgpr9
+; GFX12-NEXT:    s_lshr_b64 s[10:11], s[10:11], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[16:17], s[16:17], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[10:11], s[10:11], s[16:17]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s0, s11
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s12, s10
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9_sgpr10_sgpr11
+; GFX12-NEXT:    s_mov_b32 s9, s13
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s10, s12
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s11, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s5, s4
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; implicit-def: $sgpr0
+; GFX12-NEXT:    ; implicit-def: $sgpr4
+; GFX12-NEXT:    ; kill: def $sgpr14 killed $sgpr14 def $sgpr14_sgpr15
+; GFX12-NEXT:    s_mov_b32 s15, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[16:17], s[2:3], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[12:13], s[6:7], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[16:17], s[12:13], s[16:17]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s13, s17
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s0, s16
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[2:3], s[2:3], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[14:15], s[14:15], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[2:3], s[2:3], s[14:15]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s4, s3
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s12, s2
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
+; GFX12-NEXT:    s_mov_b32 s1, s13
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s2, s12
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s3, s4
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s4, s6
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    v_mov_b32_e32 v0, s5
+; GFX12-NEXT:    buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX12-NEXT:    v_mov_b32_e32 v1, s4
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    buffer_store_b32 v0, v1, s[0:3], null offen
+; GFX12-NEXT:    s_endpgm
+entry:
+  %val = load volatile i32, ptr addrspace(7) %in, !amdgpu.last.use !{}
+  store i32 %val, ptr addrspace(7) %out
+  ret void
+}
+
+define amdgpu_kernel void @buffer_last_use_and_nontemporal_load(ptr addrspace(7) %in, ptr addrspace(7) %out) {
+; GFX12-LABEL: buffer_last_use_and_nontemporal_load:
+; GFX12:       ; %bb.0: ; %entry
+; GFX12-NEXT:    s_mov_b64 s[0:1], s[4:5]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_load_b64 s[4:5], s[0:1], 0x0
+; GFX12-NEXT:    s_load_b64 s[10:11], s[0:1], 0x8
+; GFX12-NEXT:    s_load_b32 s16, s[0:1], 0x10
+; GFX12-NEXT:    s_load_b64 s[6:7], s[0:1], 0x20
+; GFX12-NEXT:    s_load_b64 s[2:3], s[0:1], 0x28
+; GFX12-NEXT:    s_load_b32 s14, s[0:1], 0x30
+; GFX12-NEXT:    ; implicit-def: $sgpr0
+; GFX12-NEXT:    ; implicit-def: $sgpr1
+; GFX12-NEXT:    ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
+; GFX12-NEXT:    s_mov_b32 s17, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s1, 32
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_lshl_b64 s[12:13], s[10:11], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[8:9], s[4:5], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s13, s9
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr8 killed $sgpr8 killed $sgpr8_sgpr9
+; GFX12-NEXT:    s_lshr_b64 s[10:11], s[10:11], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[16:17], s[16:17], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[10:11], s[10:11], s[16:17]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s0, s11
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s12, s10
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9_sgpr10_sgpr11
+; GFX12-NEXT:    s_mov_b32 s9, s13
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s10, s12
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s11, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s5, s4
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; implicit-def: $sgpr0
+; GFX12-NEXT:    ; implicit-def: $sgpr4
+; GFX12-NEXT:    ; kill: def $sgpr14 killed $sgpr14 def $sgpr14_sgpr15
+; GFX12-NEXT:    s_mov_b32 s15, s0
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[16:17], s[2:3], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[12:13], s[6:7], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[16:17], s[12:13], s[16:17]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s13, s17
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s0, s16
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshr_b64 s[2:3], s[2:3], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_lshl_b64 s[14:15], s[14:15], s1
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_or_b64 s[2:3], s[2:3], s[14:15]
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s4, s3
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s12, s2
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
+; GFX12-NEXT:    s_mov_b32 s1, s13
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s2, s12
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s3, s4
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    s_mov_b32 s4, s6
+; GFX12-NEXT:    s_wait_alu 0xfffe
+; GFX12-NEXT:    v_mov_b32_e32 v0, s5
+; GFX12-NEXT:    buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_LU
+; GFX12-NEXT:    v_mov_b32_e32 v1, s4
+; GFX12-NEXT:    s_wait_loadcnt 0x0
+; GFX12-NEXT:    buffer_store_b32 v0, v1, s[0:3], null offen
+; GFX12-NEXT:    s_endpgm
+entry:
+  %val = load i32, ptr addrspace(7) %in, !amdgpu.last.use !{}, !nontemporal !0
+  store i32 %val, ptr addrspace(7) %out
+  ret void
+}
+
+!0 = !{i32 1}
+declare i32 @llvm.amdgcn.workitem.id.x()



More information about the llvm-commits mailing list