[llvm] [NVPTX] Add intrinsics for prefetch.* (PR #125887)

Abhilash Majumder via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 6 02:37:32 PST 2025


https://github.com/abhilash1910 updated https://github.com/llvm/llvm-project/pull/125887

>From 77420f63875ef876a3c1f35896fa0e9065960f92 Mon Sep 17 00:00:00 2001
From: abmajumder <abmajumder at nvidia.com>
Date: Wed, 5 Feb 2025 22:37:33 +0530
Subject: [PATCH 1/2] add prefetch intrinsics

---
 llvm/docs/NVPTXUsage.rst                 | 41 ++++++++++++
 llvm/include/llvm/IR/IntrinsicsNVVM.td   | 25 ++++++++
 llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 25 ++++++++
 llvm/test/CodeGen/NVPTX/prefetch.ll      | 82 ++++++++++++++++++++++++
 4 files changed, 173 insertions(+)
 create mode 100644 llvm/test/CodeGen/NVPTX/prefetch.ll

diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index dec6ad4e541152a..31602a8c4981157 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -581,6 +581,47 @@ prefetched in terms of bytes and it must be a multiple of 16.
 For more information, refer PTX ISA
 `<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-cp-async-bulk-prefetch>`_.
 
+'``llvm.nvvm.prefetch.*``'
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+.. code-block:: llvm
+
+  declare void  @llvm.nvvm.prefetch.local.L1.evictnormal(ptr addrspace(5) %localPtr)
+  declare void  @llvm.nvvm.prefetch.local.L2.evictnormal(ptr addrspace(5) %localPtr)
+  
+  declare void  @llvm.nvvm.prefetch.global.L1.evictnormal(ptr addrspace(1) %globalPtr)
+  declare void  @llvm.nvvm.prefetch.global.L2.evictnormal(ptr addrspace(1) %globalPtr)
+  declare void  @llvm.nvvm.prefetch.global.L1.evictlast(ptr addrspace(1) %globalPtr)
+  declare void  @llvm.nvvm.prefetch.global.L2.evictlast(ptr addrspace(1) %globalPtr)
+  
+  declare void  @llvm.nvvm.prefetch.L1.evictnormal(ptr %ptr)
+  declare void  @llvm.nvvm.prefetch.L2.evictnormal(ptr %ptr)
+  
+  declare void  @llvm.nvvm.prefetchu.L1.evictnormal(ptr %ptr)
+
+Overview:
+"""""""""
+
+The '``@llvm.nvvm.prefetch.*``' and '``@llvm.nvvm.prefetchu.*``' intrinsic
+correspond to the '``prefetch.*``;' and '``prefetchu.*``' family of PTX instructions. 
+The '``prefetch.*``' instructions bring the cache line containing the
+specified address in global or local memory address space into the 
+specified cache level (L1 or L2). The '`prefetchu.*``' instruction brings the cache line 
+containing the specified generic address into the specified uniform cache level.
+If no address space is specified, it is assumed to be generic address. The intrinsic 
+uses and eviction priority which can be accessed by the '``.level::eviction_priority``' modifier. 
+
+
+* A prefetch to a shared memory location performs no operation.
+* A prefetch into the uniform cache requires a generic address, 
+  and no operation occurs if the address maps to a const, local, or shared memory location.
+
+For more information, refer PTX ISA
+`<https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-prefetch-prefetchu>`_.
+
 '``llvm.nvvm.cp.async.bulk.tensor.g2s.tile.[1-5]d``'
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index abbe25bf0040a6f..e9504b0ef5a2f20 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -48,6 +48,7 @@
 
 def llvm_global_ptr_ty  : LLVMQualPointerType<1>;  // (global)ptr
 def llvm_shared_ptr_ty  : LLVMQualPointerType<3>;  // (shared)ptr
+def llvm_local_ptr_ty   : LLVMQualPointerType<5>;  // (local)ptr
 def llvm_tmem_ptr_ty    : LLVMQualPointerType<6>;  // (tensor memory)ptr
 
 //
@@ -4999,6 +5000,30 @@ foreach dim = [1, 2, 3, 4, 5] in {
   }
 }
 
+// Intrinsics for Prefetch and Prefetchu
+foreach addr = ["global", "local", ""] in {
+  foreach evict = !if(!eq(addr, "global"),
+                  ["evictlast", "evictnormal"],
+                  ["evictnormal"]) in {
+    foreach level = ["L1", "L2"] in {
+      def int_nvvm_prefetch_ # !if(!eq(addr, ""), "", addr # "_") 
+          # level # "_" # evict : Intrinsic<[], 
+                                            !cond(
+                                            !eq(addr, "global") : [llvm_global_ptr_ty],
+                                            !eq(addr, "local"): [llvm_local_ptr_ty],
+                                            !eq(addr, ""): [llvm_ptr_ty]),
+                                            [IntrArgMemOnly,ReadOnly<ArgIndex<0>>,
+                                             NoCapture<ArgIndex<0>>],
+                                            "llvm.nvvm.prefetch." # !if(!eq(addr, ""), "", addr                                                                                                     # ".")# level # "." # evict>;   
+    }
+  }
+}
+
+def int_nvvm_prefetchu_L1_evictnormal : Intrinsic<[], [llvm_ptr_ty],
+  [IntrArgMemOnly, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
+  "llvm.nvvm.prefetchu.L1.evictnormal">;
+
+
 // Intrinsics for Bulk Copy using TMA (non-tensor)
 // From Global to Shared Cluster
 def int_nvvm_cp_async_bulk_global_to_shared_cluster
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index a0d00e4aac560a5..69f9cb3ff0c87b8 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -740,6 +740,31 @@ foreach dim = [1, 2, 3, 4, 5] in {
   }
 }
 
+//Prefetch and Prefetchu 
+class PREFETCH_INTRS<string InstName, Intrinsic Intrin> :
+          NVPTXInst<(outs), (ins Int64Regs:$addr),
+            !strconcat(InstName, " [$addr];"),
+            [(Intrin i64:$addr)]>,
+          Requires<[hasPTX<80>, hasSM<90>]>;
+   
+
+// Only global supports evictlast and evictnormal.
+// Other variants (local and default) only support evictnormal
+foreach addr = ["global", "local", ""] in {
+  foreach evict = !if(!eq(addr, "global"),
+                          ["evictlast", "evictnormal"],
+                          ["evictnormal"]) in {
+    foreach level = ["L1", "L2"] in {
+      def PREFETCH_# addr # level # "_" # evict : PREFETCH_INTRS<
+                                                 "prefetch." # !if(!eq(addr, ""), "", addr # ".") # level # "." # evict,
+                                                 !cast<Intrinsic>
+                                                 ("int_nvvm_prefetch_"# !if(!eq(addr, ""), "", addr # "_") # level # "_" # evict)>;
+    }
+  }
+}
+
+def PREFETCHU_L1_EVICTNORMAL : PREFETCH_INTRS<"prefetchu.L1.evictnormal", !cast<Intrinsic>("int_nvvm_prefetchu_L1_evictnormal")>;
+
 //-----------------------------------
 // MBarrier Functions
 //-----------------------------------
diff --git a/llvm/test/CodeGen/NVPTX/prefetch.ll b/llvm/test/CodeGen/NVPTX/prefetch.ll
new file mode 100644
index 000000000000000..0576a737d69a242
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/prefetch.ll
@@ -0,0 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| FileCheck --check-prefixes=CHECK-PTX64 %s
+; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| %ptxas-verify -arch=sm_90 %}
+; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 --nvptx-short-ptr| %ptxas-verify -arch=sm_90 %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+declare void  @llvm.nvvm.prefetch.local.L1.evictnormal(ptr addrspace(5) %localPtr)
+declare void  @llvm.nvvm.prefetch.local.L2.evictnormal(ptr addrspace(5) %localPtr)
+
+declare void  @llvm.nvvm.prefetch.global.L1.evictnormal(ptr addrspace(1) %globalPtr)
+declare void  @llvm.nvvm.prefetch.global.L2.evictnormal(ptr addrspace(1) %globalPtr)
+declare void  @llvm.nvvm.prefetch.global.L1.evictlast(ptr addrspace(1) %globalPtr)
+declare void  @llvm.nvvm.prefetch.global.L2.evictlast(ptr addrspace(1) %globalPtr)
+
+declare void  @llvm.nvvm.prefetch.L1.evictnormal(ptr %ptr)
+declare void  @llvm.nvvm.prefetch.L2.evictnormal(ptr %ptr)
+
+declare void  @llvm.nvvm.prefetchu.L1.evictnormal(ptr %ptr)
+
+define void @prefetch_local(ptr addrspace(5) %localPtr) {
+; CHECK-PTX64-LABEL: prefetch_local(
+; CHECK-PTX64:       {
+; CHECK-PTX64-NEXT:    .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT:  // %bb.0:
+; CHECK-PTX64-NEXT:    ld.param.u64 %rd1, [prefetch_local_param_0];
+; CHECK-PTX64-NEXT:    prefetch.local.L1.evictnormal [%rd1];
+; CHECK-PTX64-NEXT:    prefetch.local.L2.evictnormal [%rd1];
+; CHECK-PTX64-NEXT:    ret;
+  tail call void @llvm.nvvm.prefetch.local.L1.evictnormal(ptr addrspace(5) %localPtr)
+  tail call void @llvm.nvvm.prefetch.local.L2.evictnormal(ptr addrspace(5) %localPtr)
+  ret void
+}
+
+define void @prefetch_global(ptr addrspace(1) %globalPtr) {
+; CHECK-PTX64-LABEL: prefetch_global(
+; CHECK-PTX64:       {
+; CHECK-PTX64-NEXT:    .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT:  // %bb.0:
+; CHECK-PTX64-NEXT:    ld.param.u64 %rd1, [prefetch_global_param_0];
+; CHECK-PTX64-NEXT:    prefetch.global.L1.evictnormal [%rd1];
+; CHECK-PTX64-NEXT:    prefetch.global.L2.evictnormal [%rd1];
+; CHECK-PTX64-NEXT:    prefetch.global.L1.evictlast [%rd1];
+; CHECK-PTX64-NEXT:    prefetch.global.L2.evictlast [%rd1];
+; CHECK-PTX64-NEXT:    ret;
+  tail call void @llvm.nvvm.prefetch.global.L1.evictnormal(ptr addrspace(1) %globalPtr)
+  tail call void @llvm.nvvm.prefetch.global.L2.evictnormal(ptr addrspace(1) %globalPtr)
+  tail call void @llvm.nvvm.prefetch.global.L1.evictlast(ptr addrspace(1) %globalPtr)
+  tail call void @llvm.nvvm.prefetch.global.L2.evictlast(ptr addrspace(1) %globalPtr)
+  ret void
+}
+
+
+define void @prefetch_(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetch_(
+; CHECK-PTX64:       {
+; CHECK-PTX64-NEXT:    .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT:  // %bb.0:
+; CHECK-PTX64-NEXT:    ld.param.u64 %rd1, [prefetch__param_0];
+; CHECK-PTX64-NEXT:    prefetch.L1.evictnormal [%rd1];
+; CHECK-PTX64-NEXT:    prefetch.L2.evictnormal [%rd1];
+; CHECK-PTX64-NEXT:    ret;
+  tail call void @llvm.nvvm.prefetch.L1.evictnormal(ptr %ptr)
+  tail call void @llvm.nvvm.prefetch.L2.evictnormal(ptr %ptr)
+  ret void
+}
+
+define void @prefetchu_l1(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetchu_l1(
+; CHECK-PTX64:       {
+; CHECK-PTX64-NEXT:    .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT:  // %bb.0:
+; CHECK-PTX64-NEXT:    ld.param.u64 %rd1, [prefetchu_l1_param_0];
+; CHECK-PTX64-NEXT:    prefetchu.L1.evictnormal [%rd1];
+; CHECK-PTX64-NEXT:    ret;
+  tail call void @llvm.nvvm.prefetchu.L1.evictnormal(ptr %ptr)
+  ret void
+}
\ No newline at end of file

>From 54de98f7520b83a4d20a859fbe8644033878c422 Mon Sep 17 00:00:00 2001
From: abmajumder <abmajumder at nvidia.com>
Date: Thu, 6 Feb 2025 16:02:58 +0530
Subject: [PATCH 2/2] refine

---
 llvm/docs/NVPTXUsage.rst                 |  3 +--
 llvm/include/llvm/IR/IntrinsicsNVVM.td   | 21 ++++++++++-----------
 llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 22 +++++++++++-----------
 3 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index 31602a8c4981157..90a64640b75225d 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -612,8 +612,7 @@ specified address in global or local memory address space into the
 specified cache level (L1 or L2). The '`prefetchu.*``' instruction brings the cache line 
 containing the specified generic address into the specified uniform cache level.
 If no address space is specified, it is assumed to be generic address. The intrinsic 
-uses and eviction priority which can be accessed by the '``.level::eviction_priority``' modifier. 
-
+uses and eviction priority which can be accessed by the '``.level::eviction_priority``' modifier.
 
 * A prefetch to a shared memory location performs no operation.
 * A prefetch into the uniform cache requires a generic address, 
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index e9504b0ef5a2f20..36e79b5e50a34ac 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -5002,26 +5002,25 @@ foreach dim = [1, 2, 3, 4, 5] in {
 
 // Intrinsics for Prefetch and Prefetchu
 foreach addr = ["global", "local", ""] in {
-  foreach evict = !if(!eq(addr, "global"),
-                  ["evictlast", "evictnormal"],
-                  ["evictnormal"]) in {
-    foreach level = ["L1", "L2"] in {
+  foreach level = ["L1", "L2"] in {
+    foreach evict = !if(!eq(addr, "global"),
+                    ["evictlast", "evictnormal"],
+                    ["evictnormal"]) in {
+                    
       def int_nvvm_prefetch_ # !if(!eq(addr, ""), "", addr # "_") 
           # level # "_" # evict : Intrinsic<[], 
                                             !cond(
                                             !eq(addr, "global") : [llvm_global_ptr_ty],
-                                            !eq(addr, "local"): [llvm_local_ptr_ty],
-                                            !eq(addr, ""): [llvm_ptr_ty]),
-                                            [IntrArgMemOnly,ReadOnly<ArgIndex<0>>,
-                                             NoCapture<ArgIndex<0>>],
-                                            "llvm.nvvm.prefetch." # !if(!eq(addr, ""), "", addr                                                                                                     # ".")# level # "." # evict>;   
+                                            !eq(addr, "local") : [llvm_local_ptr_ty],
+                                            !eq(addr, "") : [llvm_ptr_ty]),
+                                            [IntrArgMemOnly, ReadOnly<ArgIndex<0>>,
+                                             NoCapture<ArgIndex<0>>]>;   
     }
   }
 }
 
 def int_nvvm_prefetchu_L1_evictnormal : Intrinsic<[], [llvm_ptr_ty],
-  [IntrArgMemOnly, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
-  "llvm.nvvm.prefetchu.L1.evictnormal">;
+  [IntrArgMemOnly, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;
 
 
 // Intrinsics for Bulk Copy using TMA (non-tensor)
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 69f9cb3ff0c87b8..d3792dc76c2019a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -743,27 +743,27 @@ foreach dim = [1, 2, 3, 4, 5] in {
 //Prefetch and Prefetchu 
 class PREFETCH_INTRS<string InstName, Intrinsic Intrin> :
           NVPTXInst<(outs), (ins Int64Regs:$addr),
-            !strconcat(InstName, " [$addr];"),
-            [(Intrin i64:$addr)]>,
+          !strconcat(InstName, " [$addr];"),
+          [(Intrin i64:$addr)]>,
           Requires<[hasPTX<80>, hasSM<90>]>;
    
 
 // Only global supports evictlast and evictnormal.
 // Other variants (local and default) only support evictnormal
 foreach addr = ["global", "local", ""] in {
-  foreach evict = !if(!eq(addr, "global"),
-                          ["evictlast", "evictnormal"],
-                          ["evictnormal"]) in {
-    foreach level = ["L1", "L2"] in {
-      def PREFETCH_# addr # level # "_" # evict : PREFETCH_INTRS<
-                                                 "prefetch." # !if(!eq(addr, ""), "", addr # ".") # level # "." # evict,
-                                                 !cast<Intrinsic>
-                                                 ("int_nvvm_prefetch_"# !if(!eq(addr, ""), "", addr # "_") # level # "_" # evict)>;
+  foreach level = ["L1", "L2"] in {
+    foreach evict = !if(!eq(addr, "global"),
+                       ["evictlast", "evictnormal"],
+                       ["evictnormal"]) in {
+    
+      defvar inst_name = "prefetch." # !if(!eq(addr, ""), "", addr # ".") # level # "." # evict;
+      defvar intr_name = !strconcat("int_nvvm_", !subst(".", "_", inst_name));
+      def PREFETCH_# addr # "_" # level # "_" # evict : PREFETCH_INTRS<inst_name, !cast<Intrinsic>(intr_name)>;
     }
   }
 }
 
-def PREFETCHU_L1_EVICTNORMAL : PREFETCH_INTRS<"prefetchu.L1.evictnormal", !cast<Intrinsic>("int_nvvm_prefetchu_L1_evictnormal")>;
+def PREFETCHU_L1_EVICTNORMAL : PREFETCH_INTRS<"prefetchu.L1.evictnormal",  int_nvvm_prefetchu_L1_evictnormal>;
 
 //-----------------------------------
 // MBarrier Functions



More information about the llvm-commits mailing list