[llvm] [NVPTX] Add intrinsics for prefetch.* (PR #125887)
Abhilash Majumder via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 5 09:16:46 PST 2025
https://github.com/abhilash1910 created https://github.com/llvm/llvm-project/pull/125887
\[NVPTX\] Add Prefetch intrinsics
This PR adds prefetch intrinsics with the relevant eviction priorities.
* Lit tests are added as part of prefetch.ll
* The generated PTX is verified with a 12.3 ptxas executable.
* Added docs for these intrinsics in NVPTXUsage.rst.
For more information, refer PTX ISA `<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-prefetch-prefetchu>`_.
>From 77420f63875ef876a3c1f35896fa0e9065960f92 Mon Sep 17 00:00:00 2001
From: abmajumder <abmajumder at nvidia.com>
Date: Wed, 5 Feb 2025 22:37:33 +0530
Subject: [PATCH] add prefetch intrinsics
---
llvm/docs/NVPTXUsage.rst | 41 ++++++++++++
llvm/include/llvm/IR/IntrinsicsNVVM.td | 25 ++++++++
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 25 ++++++++
llvm/test/CodeGen/NVPTX/prefetch.ll | 82 ++++++++++++++++++++++++
4 files changed, 173 insertions(+)
create mode 100644 llvm/test/CodeGen/NVPTX/prefetch.ll
diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index dec6ad4e541152..31602a8c498115 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -581,6 +581,47 @@ prefetched in terms of bytes and it must be a multiple of 16.
For more information, refer PTX ISA
`<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-cp-async-bulk-prefetch>`_.
+'``llvm.nvvm.prefetch.*``'
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+.. code-block:: llvm
+
+ declare void @llvm.nvvm.prefetch.local.L1.evictnormal(ptr addrspace(5) %localPtr)
+ declare void @llvm.nvvm.prefetch.local.L2.evictnormal(ptr addrspace(5) %localPtr)
+
+ declare void @llvm.nvvm.prefetch.global.L1.evictnormal(ptr addrspace(1) %globalPtr)
+ declare void @llvm.nvvm.prefetch.global.L2.evictnormal(ptr addrspace(1) %globalPtr)
+ declare void @llvm.nvvm.prefetch.global.L1.evictlast(ptr addrspace(1) %globalPtr)
+ declare void @llvm.nvvm.prefetch.global.L2.evictlast(ptr addrspace(1) %globalPtr)
+
+ declare void @llvm.nvvm.prefetch.L1.evictnormal(ptr %ptr)
+ declare void @llvm.nvvm.prefetch.L2.evictnormal(ptr %ptr)
+
+ declare void @llvm.nvvm.prefetchu.L1.evictnormal(ptr %ptr)
+
+Overview:
+"""""""""
+
+The '``@llvm.nvvm.prefetch.*``' and '``@llvm.nvvm.prefetchu.*``' intrinsic
+correspond to the '``prefetch.*``;' and '``prefetchu.*``' family of PTX instructions.
+The '``prefetch.*``' instructions bring the cache line containing the
+specified address in global or local memory address space into the
+specified cache level (L1 or L2). The '`prefetchu.*``' instruction brings the cache line
+containing the specified generic address into the specified uniform cache level.
+If no address space is specified, it is assumed to be generic address. The intrinsic
+uses and eviction priority which can be accessed by the '``.level::eviction_priority``' modifier.
+
+
+* A prefetch to a shared memory location performs no operation.
+* A prefetch into the uniform cache requires a generic address,
+ and no operation occurs if the address maps to a const, local, or shared memory location.
+
+For more information, refer PTX ISA
+`<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-prefetch-prefetchu>`_.
+
'``llvm.nvvm.cp.async.bulk.tensor.g2s.tile.[1-5]d``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index abbe25bf0040a6..e9504b0ef5a2f2 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -48,6 +48,7 @@
def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
+def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
//
@@ -4999,6 +5000,30 @@ foreach dim = [1, 2, 3, 4, 5] in {
}
}
+// Intrinsics for Prefetch and Prefetchu
+foreach addr = ["global", "local", ""] in {
+ foreach evict = !if(!eq(addr, "global"),
+ ["evictlast", "evictnormal"],
+ ["evictnormal"]) in {
+ foreach level = ["L1", "L2"] in {
+ def int_nvvm_prefetch_ # !if(!eq(addr, ""), "", addr # "_")
+ # level # "_" # evict : Intrinsic<[],
+ !cond(
+ !eq(addr, "global") : [llvm_global_ptr_ty],
+ !eq(addr, "local"): [llvm_local_ptr_ty],
+ !eq(addr, ""): [llvm_ptr_ty]),
+ [IntrArgMemOnly,ReadOnly<ArgIndex<0>>,
+ NoCapture<ArgIndex<0>>],
+ "llvm.nvvm.prefetch." # !if(!eq(addr, ""), "", addr # ".")# level # "." # evict>;
+ }
+ }
+}
+
+def int_nvvm_prefetchu_L1_evictnormal : Intrinsic<[], [llvm_ptr_ty],
+ [IntrArgMemOnly, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
+ "llvm.nvvm.prefetchu.L1.evictnormal">;
+
+
// Intrinsics for Bulk Copy using TMA (non-tensor)
// From Global to Shared Cluster
def int_nvvm_cp_async_bulk_global_to_shared_cluster
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index a0d00e4aac560a..69f9cb3ff0c87b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -740,6 +740,31 @@ foreach dim = [1, 2, 3, 4, 5] in {
}
}
+//Prefetch and Prefetchu
+class PREFETCH_INTRS<string InstName, Intrinsic Intrin> :
+ NVPTXInst<(outs), (ins Int64Regs:$addr),
+ !strconcat(InstName, " [$addr];"),
+ [(Intrin i64:$addr)]>,
+ Requires<[hasPTX<80>, hasSM<90>]>;
+
+
+// Only global supports evictlast and evictnormal.
+// Other variants (local and default) only support evictnormal
+foreach addr = ["global", "local", ""] in {
+ foreach evict = !if(!eq(addr, "global"),
+ ["evictlast", "evictnormal"],
+ ["evictnormal"]) in {
+ foreach level = ["L1", "L2"] in {
+ def PREFETCH_# addr # level # "_" # evict : PREFETCH_INTRS<
+ "prefetch." # !if(!eq(addr, ""), "", addr # ".") # level # "." # evict,
+ !cast<Intrinsic>
+ ("int_nvvm_prefetch_"# !if(!eq(addr, ""), "", addr # "_") # level # "_" # evict)>;
+ }
+ }
+}
+
+def PREFETCHU_L1_EVICTNORMAL : PREFETCH_INTRS<"prefetchu.L1.evictnormal", !cast<Intrinsic>("int_nvvm_prefetchu_L1_evictnormal")>;
+
//-----------------------------------
// MBarrier Functions
//-----------------------------------
diff --git a/llvm/test/CodeGen/NVPTX/prefetch.ll b/llvm/test/CodeGen/NVPTX/prefetch.ll
new file mode 100644
index 00000000000000..0576a737d69a24
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/prefetch.ll
@@ -0,0 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| FileCheck --check-prefixes=CHECK-PTX64 %s
+; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| %ptxas-verify -arch=sm_90 %}
+; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 --nvptx-short-ptr| %ptxas-verify -arch=sm_90 %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+declare void @llvm.nvvm.prefetch.local.L1.evictnormal(ptr addrspace(5) %localPtr)
+declare void @llvm.nvvm.prefetch.local.L2.evictnormal(ptr addrspace(5) %localPtr)
+
+declare void @llvm.nvvm.prefetch.global.L1.evictnormal(ptr addrspace(1) %globalPtr)
+declare void @llvm.nvvm.prefetch.global.L2.evictnormal(ptr addrspace(1) %globalPtr)
+declare void @llvm.nvvm.prefetch.global.L1.evictlast(ptr addrspace(1) %globalPtr)
+declare void @llvm.nvvm.prefetch.global.L2.evictlast(ptr addrspace(1) %globalPtr)
+
+declare void @llvm.nvvm.prefetch.L1.evictnormal(ptr %ptr)
+declare void @llvm.nvvm.prefetch.L2.evictnormal(ptr %ptr)
+
+declare void @llvm.nvvm.prefetchu.L1.evictnormal(ptr %ptr)
+
+define void @prefetch_local(ptr addrspace(5) %localPtr) {
+; CHECK-PTX64-LABEL: prefetch_local(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.u64 %rd1, [prefetch_local_param_0];
+; CHECK-PTX64-NEXT: prefetch.local.L1.evictnormal [%rd1];
+; CHECK-PTX64-NEXT: prefetch.local.L2.evictnormal [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.local.L1.evictnormal(ptr addrspace(5) %localPtr)
+ tail call void @llvm.nvvm.prefetch.local.L2.evictnormal(ptr addrspace(5) %localPtr)
+ ret void
+}
+
+define void @prefetch_global(ptr addrspace(1) %globalPtr) {
+; CHECK-PTX64-LABEL: prefetch_global(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.u64 %rd1, [prefetch_global_param_0];
+; CHECK-PTX64-NEXT: prefetch.global.L1.evictnormal [%rd1];
+; CHECK-PTX64-NEXT: prefetch.global.L2.evictnormal [%rd1];
+; CHECK-PTX64-NEXT: prefetch.global.L1.evictlast [%rd1];
+; CHECK-PTX64-NEXT: prefetch.global.L2.evictlast [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.global.L1.evictnormal(ptr addrspace(1) %globalPtr)
+ tail call void @llvm.nvvm.prefetch.global.L2.evictnormal(ptr addrspace(1) %globalPtr)
+ tail call void @llvm.nvvm.prefetch.global.L1.evictlast(ptr addrspace(1) %globalPtr)
+ tail call void @llvm.nvvm.prefetch.global.L2.evictlast(ptr addrspace(1) %globalPtr)
+ ret void
+}
+
+
+define void @prefetch_(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetch_(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.u64 %rd1, [prefetch__param_0];
+; CHECK-PTX64-NEXT: prefetch.L1.evictnormal [%rd1];
+; CHECK-PTX64-NEXT: prefetch.L2.evictnormal [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.L1.evictnormal(ptr %ptr)
+ tail call void @llvm.nvvm.prefetch.L2.evictnormal(ptr %ptr)
+ ret void
+}
+
+define void @prefetchu_l1(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetchu_l1(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.u64 %rd1, [prefetchu_l1_param_0];
+; CHECK-PTX64-NEXT: prefetchu.L1.evictnormal [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetchu.L1.evictnormal(ptr %ptr)
+ ret void
+}
\ No newline at end of file
More information about the llvm-commits
mailing list