[llvm] [NVPTX] Add TMA bulk tensor copy intrinsics (PR #96083)

Durgadoss R via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 19 08:06:22 PDT 2024


================
@@ -0,0 +1,169 @@
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx80| FileCheck --check-prefixes=CHECK_PTX64 %s
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx80 --nvptx-short-ptr| FileCheck --check-prefixes=CHECK_PTX_SHARED32 %s
+; RUN: %if ptxas-12.3 %{ llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx80| %ptxas-verify -arch=sm_90 %}
+; RUN: %if ptxas-12.3 %{ llc < %s -march=nvptx64 -mcpu=sm_90 -mattr=+ptx80 --nvptx-short-ptr| %ptxas-verify -arch=sm_90 %}
+
+declare void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.1d(i32 %flags, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch);
+declare void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.2d(i32 %flags, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch);
+declare void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.3d(i32 %flags, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch);
+declare void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.4d(i32 %flags, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch);
+declare void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.5d(i32 %flags, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch);
+
+; CHECK-LABEL: cp_async_bulk_tensor_g2s_1d
+define void @cp_async_bulk_tensor_g2s_1d(i32 %flag, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch) {
+  ; CHECK_PTX64: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}}], [%rd{{[0-9]+}}];
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}}], [%r{{[0-9]+}}];
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.1d(i32 0, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef)
+
+  ; CHECK_PTX64: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.L2::cache_hint [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}}], [%rd{{[0-9]+}}], %rd{{[0-9]+}};
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.L2::cache_hint [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}}], [%r{{[0-9]+}}], %rd{{[0-9]+}};
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.1d(i32 1, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 %ch)
+  ; CHECK_PTX64: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}}], [%rd{{[0-9]+}}], %rs{{[0-9]+}};
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}}], [%r{{[0-9]+}}], %rs{{[0-9]+}};
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.1d(i32 2, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch)
+
+  ; CHECK_PTX64: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}}], [%rd{{[0-9]+}}], %rs{{[0-9]+}}, %rd{{[0-9]+}};
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}}], [%r{{[0-9]+}}], %rs{{[0-9]+}}, %rd{{[0-9]+}};
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.1d(i32 3, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch)
+  ret void
+}
+
+; CHECK-LABEL: cp_async_bulk_tensor_g2s_2d
+define void @cp_async_bulk_tensor_g2s_2d(i32 %flag, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch) {
+  ; CHECK_PTX64: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}}], [%rd{{[0-9]+}}];
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}}], [%r{{[0-9]+}}];
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.2d(i32 0, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef)
+
+  ; CHECK_PTX64: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.L2::cache_hint [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}}], [%rd{{[0-9]+}}], %rd{{[0-9]+}};
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.L2::cache_hint [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}}], [%r{{[0-9]+}}], %rd{{[0-9]+}};
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.2d(i32 1, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 %ch)
+
+  ; CHECK_PTX64: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}}], [%rd{{[0-9]+}}], %rs{{[0-9]+}};
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}}], [%r{{[0-9]+}}], %rs{{[0-9]+}};
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.2d(i32 2, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch)
+
+  ; CHECK_PTX64: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}}], [%rd{{[0-9]+}}], %rs{{[0-9]+}}, %rd{{[0-9]+}};
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}}], [%r{{[0-9]+}}], %rs{{[0-9]+}}, %rd{{[0-9]+}};
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.2d(i32 3, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch)
+  ret void
+}
+
+; CHECK-LABEL: cp_async_bulk_tensor_g2s_3d_tile
+define void @cp_async_bulk_tensor_g2s_3d_tile(i32 %flag, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch) {
+  ; CHECK_PTX64: cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes [%rd{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}], [%rd{{[0-9]+}}];
+  ; CHECK_PTX_SHARED32: cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes [%r{{[0-9]+}}], [%rd{{[0-9]+}}, {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}], [%r{{[0-9]+}}];
+  tail call void @llvm.nvvm.cp.async.bulk.tensor.gmem.to.smem.3d(i32 0, ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef)
----------------
durga4github wrote:

I realized that the PTX spec does not explicitly mention im2col-offset size as 16-bits anywhere. 
It is in the example section as below:
".reg .u16 i2cOffW, i2cOffH, i2cOffD;"

For tensor-dims:
"The individual tensor coordinates in tensorCoords are of type .s32".

I will request an update for these in the Spec to make them explicit.
So, we do not have any type-mismatches here (as we expect).

https://github.com/llvm/llvm-project/pull/96083


More information about the llvm-commits mailing list