[clang] [llvm] [mlir] [NVPTX] Add support for Shared Cluster Memory address space [1/2] (PR #135444)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 22 14:22:29 PDT 2025
https://github.com/modiking updated https://github.com/llvm/llvm-project/pull/135444
>From 12532e4182038ed6c7bef3b8e03494ef2bd40580 Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Mon, 31 Mar 2025 22:54:56 -0700
Subject: [PATCH 1/9] initial implementation
---
clang/lib/Basic/Targets/NVPTX.cpp | 7 +-
clang/test/CodeGen/target-data.c | 2 +-
clang/test/CodeGenCUDA/builtins-sm90.cu | 2 +-
llvm/docs/NVPTXUsage.rst | 6 +-
llvm/include/llvm/IR/IntrinsicsNVVM.td | 45 +--
llvm/include/llvm/Support/NVPTXAddrSpace.h | 1 +
llvm/lib/IR/AutoUpgrade.cpp | 87 ++++++
.../NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp | 1 +
llvm/lib/Target/NVPTX/NVPTX.h | 1 +
llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp | 5 +
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 11 +-
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 1 +
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 8 +-
llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp | 14 +-
llvm/lib/Target/NVPTX/NVPTXUtilities.h | 2 +
.../Assembler/auto_upgrade_nvvm_intrinsics.ll | 57 ++++
.../CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll | 96 +++----
llvm/test/CodeGen/NVPTX/cp-async-bulk.ll | 18 +-
.../NVPTX/distributed-shared-cluster.ll | 258 ++++++++++++++++++
llvm/test/CodeGen/NVPTX/nvptx-aa.ll | 12 +-
.../include/mlir/Dialect/LLVMIR/NVVMDialect.h | 5 +-
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td | 5 +-
.../Target/LLVMIR/nvvm/tma_bulk_copy.mlir | 24 +-
23 files changed, 556 insertions(+), 112 deletions(-)
create mode 100644 llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp
index 5931a77a85fec..08c8460045c6a 100644
--- a/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/clang/lib/Basic/Targets/NVPTX.cpp
@@ -71,10 +71,11 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
if (TargetPointerWidth == 32)
resetDataLayout(
- "e-p:32:32-p6:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
+ "e-p:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
else if (Opts.NVPTXUseShortPointers)
- resetDataLayout("e-p3:32:32-p4:32:32-p5:32:32-p6:32:32-i64:64-i128:128-v16:"
- "16-v32:32-n16:32:64");
+ resetDataLayout(
+ "e-p3:32:32-p4:32:32-p5:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:"
+ "16-v32:32-n16:32:64");
else
resetDataLayout("e-p6:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
diff --git a/clang/test/CodeGen/target-data.c b/clang/test/CodeGen/target-data.c
index fe29aadb1dd53..9cb00e8ee73d3 100644
--- a/clang/test/CodeGen/target-data.c
+++ b/clang/test/CodeGen/target-data.c
@@ -160,7 +160,7 @@
// RUN: %clang_cc1 -triple nvptx-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=NVPTX
-// NVPTX: target datalayout = "e-p:32:32-p6:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64"
+// NVPTX: target datalayout = "e-p:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64"
// RUN: %clang_cc1 -triple nvptx64-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=NVPTX64
diff --git a/clang/test/CodeGenCUDA/builtins-sm90.cu b/clang/test/CodeGenCUDA/builtins-sm90.cu
index a639c7716adb1..f4746df944536 100644
--- a/clang/test/CodeGenCUDA/builtins-sm90.cu
+++ b/clang/test/CodeGenCUDA/builtins-sm90.cu
@@ -50,7 +50,7 @@ __attribute__((global)) void kernel(long *out, void *ptr, unsigned u) {
auto * sptr = (__attribute__((address_space(3))) void *)ptr;
// CHECK: call ptr @llvm.nvvm.mapa(ptr %{{.*}}, i32 %{{.*}})
out[i++] = (long) __nvvm_mapa(ptr, u);
- // CHECK: call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
+ // CHECK: call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
out[i++] = (long) __nvvm_mapa_shared_cluster(sptr, u);
// CHECK: call i32 @llvm.nvvm.getctarank(ptr {{.*}})
out[i++] = __nvvm_getctarank(ptr);
diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index ef6e888286def..7bd68c3fe4f7a 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -552,7 +552,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %mbar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
+ declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %mbar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
Overview:
"""""""""
@@ -616,7 +616,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %mbar, ptr addrspace(3) %src, i32 %size)
+ declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %mbar, ptr addrspace(3) %src, i32 %size)
Overview:
"""""""""
@@ -771,7 +771,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
+ declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(..., i32 %d0, i32 %d1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(..., i32 %d0, i32 %d1, i32 %d2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index d09e1da457249..e415fcd8707de 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -131,6 +131,7 @@ def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
+def llvm_dshared_ptr_ty : LLVMQualPointerType<7>; // (dshared)ptr
//
// MISC
@@ -691,15 +692,15 @@ class CP_ASYNC_BULK_TENSOR_G2S_INTR<int dim, string mode> {
list<LLVMType> Im2ColOffsetsTy = !listsplat(llvm_i16_ty, NumIm2ColOffsets);
list<LLVMType> TensorDimsTy = !listsplat(llvm_i32_ty, dim);
list<LLVMType> ArgsTy = !listconcat(
- [llvm_shared_ptr_ty, // dst_smem_ptr
- llvm_shared_ptr_ty, // mbarrier_smem_ptr
- llvm_ptr_ty], // tensormap_ptr
- TensorDimsTy, // actual tensor dims
- Im2ColOffsetsTy, // im2col offsets
- [llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty] // Flag for cache_hint
+ [llvm_dshared_ptr_ty, // dst_smem_ptr
+ llvm_shared_ptr_ty, // mbarrier_smem_ptr
+ llvm_ptr_ty], // tensormap_ptr
+ TensorDimsTy, // actual tensor dims
+ Im2ColOffsetsTy, // im2col offsets
+ [llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty] // Flag for cache_hint
);
int TempFlagsStartIdx = !add(dim, 5);
@@ -5134,7 +5135,7 @@ def int_nvvm_mapa
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa">;
def int_nvvm_mapa_shared_cluster
- : DefaultAttrsIntrinsic<[llvm_shared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
+ : DefaultAttrsIntrinsic<[llvm_dshared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa.shared.cluster">;
def int_nvvm_getctarank
@@ -5234,14 +5235,14 @@ def int_nvvm_discard_L2 : DefaultAttrsIntrinsic<[],
// From Global to Shared Cluster
def int_nvvm_cp_async_bulk_global_to_shared_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_shared_ptr_ty, // dst_smem_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_global_ptr_ty, // src_gmem_ptr
- llvm_i32_ty, // copy_size
- llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty], // Flag for cache_hint
+ [llvm_dshared_ptr_ty, // dst_dsmem_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_global_ptr_ty, // src_gmem_ptr
+ llvm_i32_ty, // copy_size
+ llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty], // Flag for cache_hint
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
@@ -5251,10 +5252,10 @@ def int_nvvm_cp_async_bulk_global_to_shared_cluster
// From Shared CTA to Shared Cluster
def int_nvvm_cp_async_bulk_shared_cta_to_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_shared_ptr_ty, // dst_smem_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_shared_ptr_ty, // src_smem_ptr
- llvm_i32_ty], // copy_size
+ [llvm_dshared_ptr_ty, // dst_dsmem_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_shared_ptr_ty, // src_smem_ptr
+ llvm_i32_ty], // copy_size
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
diff --git a/llvm/include/llvm/Support/NVPTXAddrSpace.h b/llvm/include/llvm/Support/NVPTXAddrSpace.h
index 486a396621da1..a3eac31f2e5e9 100644
--- a/llvm/include/llvm/Support/NVPTXAddrSpace.h
+++ b/llvm/include/llvm/Support/NVPTXAddrSpace.h
@@ -25,6 +25,7 @@ enum AddressSpace : unsigned {
ADDRESS_SPACE_CONST = 4,
ADDRESS_SPACE_LOCAL = 5,
ADDRESS_SPACE_TENSOR = 6,
+ ADDRESS_SPACE_DSHARED = 7,
ADDRESS_SPACE_PARAM = 101,
};
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 2e17b8ce5eb7b..f66935ad4515d 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -46,6 +46,7 @@
#include "llvm/Support/AMDGPUAddrSpace.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/NVPTXAddrSpace.h"
#include "llvm/Support/Regex.h"
#include "llvm/TargetParser/Triple.h"
#include <cstdint>
@@ -938,6 +939,47 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
return false; // No other 'arm.*', 'aarch64.*'.
}
+static Intrinsic::ID shouldUpgradeNVPTXDSharedIntrinsic(Function *F,
+ StringRef Name) {
+ if (Name.consume_front("mapa.shared.cluster"))
+ if (F->getReturnType()->getPointerAddressSpace() ==
+ NVPTXAS::ADDRESS_SPACE_SHARED)
+ return Intrinsic::nvvm_mapa_shared_cluster;
+
+ if (Name.consume_front("cp.async.bulk.")) {
+ Intrinsic::ID ID =
+ StringSwitch<Intrinsic::ID>(Name)
+ .Case("global.to.shared.cluster",
+ Intrinsic::nvvm_cp_async_bulk_global_to_shared_cluster)
+ .Case("shared.cta.to.cluster",
+ Intrinsic::nvvm_cp_async_bulk_shared_cta_to_cluster)
+ .Case("tensor.g2s.im2col.3d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_3d)
+ .Case("tensor.g2s.im2col.4d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_4d)
+ .Case("tensor.g2s.im2col.5d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_5d)
+ .Case("tensor.g2s.tile.1d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_1d)
+ .Case("tensor.g2s.tile.2d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_2d)
+ .Case("tensor.g2s.tile.3d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d)
+ .Case("tensor.g2s.tile.4d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d)
+ .Case("tensor.g2s.tile.5d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d)
+ .Default(Intrinsic::not_intrinsic);
+
+ if (ID != Intrinsic::not_intrinsic)
+ if (F->getArg(0)->getType()->getPointerAddressSpace() ==
+ NVPTXAS::ADDRESS_SPACE_SHARED)
+ return ID;
+ }
+
+ return Intrinsic::not_intrinsic;
+}
+
static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
if (Name.consume_front("fma.rn."))
return StringSwitch<Intrinsic::ID>(Name)
@@ -1278,6 +1320,14 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
}
}
+ // Upgrade Distributed Shared Memory Intrinsics
+ Intrinsic::ID IID = shouldUpgradeNVPTXDSharedIntrinsic(F, Name);
+ if (IID != Intrinsic::not_intrinsic) {
+ rename(F);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
+ return true;
+ }
+
// The following nvvm intrinsics correspond exactly to an LLVM idiom, but
// not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
//
@@ -4718,6 +4768,43 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
CI->eraseFromParent();
return;
}
+ case Intrinsic::nvvm_mapa_shared_cluster: {
+ // Create a new call with the correct address space.
+ NewCall =
+ Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)});
+ Value *Res = NewCall;
+ Res = Builder.CreateAddrSpaceCast(
+ Res, Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_GENERIC));
+ Res = Builder.CreateAddrSpaceCast(
+ Res, Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_SHARED));
+ NewCall->takeName(CI);
+ CI->replaceAllUsesWith(Res);
+ CI->eraseFromParent();
+ return;
+ }
+ case Intrinsic::nvvm_cp_async_bulk_global_to_shared_cluster:
+ case Intrinsic::nvvm_cp_async_bulk_shared_cta_to_cluster:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_3d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_4d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_5d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_1d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_2d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d: {
+
+ SmallVector<Value *, 4> Args(CI->args());
+ Args[0] = Builder.CreateAddrSpaceCast(
+ Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_GENERIC));
+ Args[0] = Builder.CreateAddrSpaceCast(
+ Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_DSHARED));
+
+ NewCall = Builder.CreateCall(NewFn, Args);
+ NewCall->takeName(CI);
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ return;
+ }
case Intrinsic::riscv_sha256sig0:
case Intrinsic::riscv_sha256sig1:
case Intrinsic::riscv_sha256sum0:
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index 4e2e4c99df803..8dbc739e3bef9 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -285,6 +285,7 @@ void NVPTXInstPrinter::printLdStCode(const MCInst *MI, int OpNum,
case NVPTX::AddressSpace::Global:
case NVPTX::AddressSpace::Const:
case NVPTX::AddressSpace::Shared:
+ case NVPTX::AddressSpace::Dshared:
case NVPTX::AddressSpace::Param:
case NVPTX::AddressSpace::Local:
O << "." << A;
diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h
index 98e77ca80b8d5..c20c522f36bd3 100644
--- a/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/llvm/lib/Target/NVPTX/NVPTX.h
@@ -176,6 +176,7 @@ enum AddressSpace : AddressSpaceUnderlyingType {
Shared = 3,
Const = 4,
Local = 5,
+ Dshared = 7,
// NVPTX Backend Private:
Param = 101
diff --git a/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp b/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
index b910ccab21bf3..60bc22f5f589c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
@@ -86,6 +86,11 @@ static AliasResult::Kind getAliasResult(unsigned AS1, unsigned AS2) {
// TODO: cvta.param is not yet supported. We need to change aliasing
// rules once it is added.
+ // Distributed shared memory aliases with shared memory.
+ if (((AS1 == ADDRESS_SPACE_SHARED) && (AS2 == ADDRESS_SPACE_DSHARED)) ||
+ ((AS1 == ADDRESS_SPACE_DSHARED) && (AS2 == ADDRESS_SPACE_SHARED)))
+ return AliasResult::MayAlias;
+
return (AS1 == AS2 ? AliasResult::MayAlias : AliasResult::NoAlias);
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 486c7c815435a..bc07999e142c6 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -513,6 +513,8 @@ static std::optional<unsigned> convertAS(unsigned AS) {
return NVPTX::AddressSpace::Global;
case llvm::ADDRESS_SPACE_SHARED:
return NVPTX::AddressSpace::Shared;
+ case llvm::ADDRESS_SPACE_DSHARED:
+ return NVPTX::AddressSpace::Dshared;
case llvm::ADDRESS_SPACE_GENERIC:
return NVPTX::AddressSpace::Generic;
case llvm::ADDRESS_SPACE_PARAM:
@@ -658,7 +660,8 @@ getOperationOrderings(MemSDNode *N, const NVPTXSubtarget *Subtarget) {
bool AddrGenericOrGlobalOrShared =
(CodeAddrSpace == NVPTX::AddressSpace::Generic ||
CodeAddrSpace == NVPTX::AddressSpace::Global ||
- CodeAddrSpace == NVPTX::AddressSpace::Shared);
+ CodeAddrSpace == NVPTX::AddressSpace::Shared ||
+ CodeAddrSpace == NVPTX::AddressSpace::Dshared);
if (!AddrGenericOrGlobalOrShared)
return NVPTX::Ordering::NotAtomic;
@@ -979,6 +982,9 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
case ADDRESS_SPACE_SHARED:
Opc = TM.is64Bit() ? NVPTX::cvta_shared_64 : NVPTX::cvta_shared;
break;
+ case ADDRESS_SPACE_DSHARED:
+ Opc = TM.is64Bit() ? NVPTX::cvta_dshared_64 : NVPTX::cvta_dshared;
+ break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_const_64 : NVPTX::cvta_const;
break;
@@ -1004,6 +1010,9 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
case ADDRESS_SPACE_SHARED:
Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_64 : NVPTX::cvta_to_shared;
break;
+ case ADDRESS_SPACE_DSHARED:
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_dshared_64 : NVPTX::cvta_to_dshared;
+ break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_to_const_64 : NVPTX::cvta_to_const;
break;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index ee6380a8a89c4..043da14bcb236 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -137,6 +137,7 @@ def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
def hasVote : Predicate<"Subtarget->hasVote()">;
def hasDouble : Predicate<"Subtarget->hasDouble()">;
+def hasClusters : Predicate<"Subtarget->hasClusters()">;
def hasLDG : Predicate<"Subtarget->hasLDG()">;
def hasLDU : Predicate<"Subtarget->hasLDU()">;
def hasPTXASUnreachableBug : Predicate<"Subtarget->hasPTXASUnreachableBug()">;
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 4ba3e6f06bb5f..e66ad45304676 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -33,6 +33,9 @@ def AS_match {
code shared = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_SHARED);
}];
+ code dshared = [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_DSHARED);
+ }];
code global = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_GLOBAL);
}];
@@ -2039,10 +2042,11 @@ class ATOMIC_GLOBAL_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.global>;
class ATOMIC_SHARED_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.shared>;
+class ATOMIC_DSHARED_CHK <dag frag>
+ : PatFrag<!setdagop(frag, ops), frag, AS_match.dshared>;
class ATOMIC_GENERIC_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.generic>;
-
multiclass F_ATOMIC_2<RegTyInfo t, string sem_str, string as_str, string op_str,
SDPatternOperator op, list<Predicate> preds> {
defvar asm_str = "atom" # sem_str # as_str # "." # op_str # " \t$dst, [$addr], $b;";
@@ -2094,6 +2098,7 @@ multiclass F_ATOMIC_2_AS<RegTyInfo t, SDPatternOperator frag, string op_str, lis
defvar frag_pat = (frag node:$a, node:$b);
defm _G : F_ATOMIC_2<t, "", ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_2<t, "", ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
+ defm _DS : F_ATOMIC_2<t, "", ".shared::cluster", op_str, ATOMIC_DSHARED_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
defm _GEN : F_ATOMIC_2<t, "", "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2101,6 +2106,7 @@ multiclass F_ATOMIC_3_AS<RegTyInfo t, SDPatternOperator frag, string sem_str, st
defvar frag_pat = (frag node:$a, node:$b, node:$c);
defm _G : F_ATOMIC_3<t, sem_str, ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_3<t, sem_str, ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
+ defm _DS : F_ATOMIC_3<t, sem_str, ".shared::cluster", op_str, ATOMIC_DSHARED_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
defm _GEN : F_ATOMIC_3<t, sem_str, "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index a4c3b43aec9f2..88896c8f407ad 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -117,13 +117,15 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeNVPTXTarget() {
static std::string computeDataLayout(bool is64Bit, bool UseShortPointers) {
std::string Ret = "e";
- if (!is64Bit)
- Ret += "-p:32:32";
- else if (UseShortPointers)
- Ret += "-p3:32:32-p4:32:32-p5:32:32";
-
// Tensor Memory (addrspace:6) is always 32-bits.
- Ret += "-p6:32:32";
+ // Distributed Shared Memory (addrspace:7) follows shared memory
+ // (addrspace:3).
+ if (!is64Bit)
+ Ret += "-p:32:32-p6:32:32-p7:32:32";
+ else if (UseShortPointers) {
+ Ret += "-p3:32:32-p4:32:32-p5:32:32-p6:32:32-p7:32:32";
+ } else
+ Ret += "-p6:32:32";
Ret += "-i64:64-i128:128-v16:16-v32:32-n16:32:64";
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
index 70bf02035fd48..90cf35b1d8d77 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -168,6 +168,8 @@ inline std::string AddressSpaceToString(AddressSpace A) {
return "const";
case AddressSpace::Shared:
return "shared";
+ case AddressSpace::Dshared:
+ return "shared::cluster";
case AddressSpace::Param:
return "param";
case AddressSpace::Local:
diff --git a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
index a3ddc62dd34e9..7792a4cc92f38 100644
--- a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -59,6 +59,19 @@ declare float @llvm.nvvm.ldg.global.f.f32.p0(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.inc.32(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.dec.32(ptr, i32)
+declare ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i64)
+
+declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+
; CHECK-LABEL: @simple_upgrade
define void @simple_upgrade(i32 %a, i64 %b, i16 %c) {
; CHECK: call i32 @llvm.bitreverse.i32(i32 %a)
@@ -254,3 +267,47 @@ define i32 @atomics(ptr %p0, i32 %a) {
ret i32 %r2
}
+; CHECK-LABEL: @nvvm_shared_cluster_intrinsics
+define void @nvvm_shared_cluster_intrinsics(ptr addrspace(3) %p0, i64 %offset) {
+; CHECK: %[[ASC1:.*]] = addrspacecast ptr addrspace(3) %p0 to ptr addrspace(7)
+; CHECK: %[[CALL:.*]] = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(7) %[[ASC1]], i64 %offset)
+; CHECK: %[[ASC2:.*]] = addrspacecast ptr addrspace(7) %[[CALL]] to ptr addrspace(3)
+ %r = call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i64 %offset)
+ ret void
+}
+
+; CHECK-LABEL: @nvvm_cp_async_bulk_intrinsics
+define void @nvvm_cp_async_bulk_intrinsics(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, ptr addrspace(3) %src_shared, i32 %size) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
+ call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
+ ret void
+}
+
+; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_im2col
+define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i16 0, i64 0, i1 false, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_tile
+define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i16 0, i64 0, i1 false, i1 false)
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll b/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
index 521dd567f837e..530a896642b89 100644
--- a/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
+++ b/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
@@ -6,18 +6,18 @@
target triple = "nvptx64-nvidia-cuda"
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_1d
-define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_1d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -55,18 +55,18 @@ define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 undef, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 undef, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_2d
-define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_2d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -106,18 +106,18 @@ define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 undef, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 undef, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_3d
-define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_3d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -159,18 +159,18 @@ define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_4d
-define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_4d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -214,18 +214,18 @@ define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_5d
-define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_5d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -271,18 +271,18 @@ define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_3d
-define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_3d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<3>;
@@ -326,18 +326,18 @@ define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(3) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], {%rs1}, %rs2;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], {%rs1}, %rs2, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_4d
-define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_4d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<4>;
@@ -385,18 +385,18 @@ define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(3) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], {%rs1, %rs2}, %rs3;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], {%rs1, %rs2}, %rs3, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_5d
-define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_5d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<5>;
@@ -448,12 +448,12 @@ define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(3) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], {%rs1, %rs2, %rs3}, %rs4;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], {%rs1, %rs2, %rs3}, %rs4, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll b/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
index aa8f553351b46..bf1b86e37ae72 100644
--- a/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
+++ b/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
@@ -6,12 +6,12 @@
target triple = "nvptx64-nvidia-cuda"
-declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1), ptr addrspace(3), i32, i64, i1)
-declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32)
+declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7), ptr addrspace(3), ptr addrspace(3), i32)
declare void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1), i32, i64, i1)
-define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(3) %dst, i32 %size, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_g2s(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -49,10 +49,10 @@ define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%r2], [%rd1], %r3, [%r1], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 0, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 0, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
@@ -89,7 +89,7 @@ define void @cp_async_bulk_s2g(ptr addrspace(3) %src, ptr addrspace(1) %dst, i32
ret void
}
-define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3) %bar, ptr addrspace(3) %dst, i32 %size) {
+define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size) {
; CHECK-PTX64-LABEL: cp_async_bulk_cta_to_cluster(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b32 %r<2>;
@@ -114,7 +114,7 @@ define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3
; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r4, [cp_async_bulk_cta_to_cluster_param_3];
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.shared::cta.mbarrier::complete_tx::bytes [%r3], [%r1], %r4, [%r2];
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src, i32 %size)
+ tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src, i32 %size)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
new file mode 100644
index 0000000000000..f499500a64ebf
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
@@ -0,0 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -o - -mcpu=sm_90 -march=nvptx64 -mattr=+ptx80 | FileCheck %s
+; RUN: %if ptxas-12.0 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| %ptxas-verify -arch=sm_90 %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+ at llvm.used = appending global [1 x ptr] [ptr @test_distributed_shared_cluster], section "llvm.metadata"
+
+declare ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i32)
+declare i1 @llvm.nvvm.isspacep.shared.cluster(ptr)
+declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
+declare ptr @llvm.nvvm.mapa(ptr, i32)
+
+define i32 @test_distributed_shared_cluster(ptr %ptr, ptr addrspace(3) %smem_ptr) local_unnamed_addr {
+; CHECK-LABEL: test_distributed_shared_cluster(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<13>;
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b32 %r<69>;
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b64 %rd<24>;
+; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd2, [test_distributed_shared_cluster_param_0];
+; CHECK-NEXT: ld.param.u64 %rd3, [test_distributed_shared_cluster_param_1];
+; CHECK-NEXT: mov.u32 %r24, %ctaid.x;
+; CHECK-NEXT: xor.b32 %r25, %r24, 1;
+; CHECK-NEXT: isspacep.shared::cluster %p1, %rd2;
+; CHECK-NEXT: mapa.u64 %rd4, %rd2, %r25;
+; CHECK-NEXT: isspacep.shared::cluster %p2, %rd4;
+; CHECK-NEXT: mapa.shared::cluster.u64 %rd5, %rd3, %r25;
+; CHECK-NEXT: mov.b16 %rs1, 0x3C00;
+; CHECK-NEXT: atom.shared::cluster.add.noftz.f16 %rs2, [%rd5], %rs1;
+; CHECK-NEXT: mov.b16 %rs3, 0x3F80;
+; CHECK-NEXT: atom.shared::cluster.add.noftz.bf16 %rs4, [%rd5], %rs3;
+; CHECK-NEXT: atom.shared::cluster.add.f32 %f1, [%rd5], 0f3F800000;
+; CHECK-NEXT: atom.shared::cluster.add.f64 %fd1, [%rd5], 0d3FF0000000000000;
+; CHECK-NEXT: atom.shared::cluster.add.u32 %r26, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.add.u64 %rd6, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.exch.b32 %r27, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.exch.b64 %rd7, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.min.s32 %r28, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.min.s64 %rd8, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.min.u32 %r29, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.min.u64 %rd9, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.max.s32 %r30, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.max.s64 %rd10, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.max.u32 %r31, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.max.u64 %rd11, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.inc.u32 %r32, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.dec.u32 %r33, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.and.b32 %r34, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.and.b64 %rd12, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.or.b32 %r35, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.or.b64 %rd13, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.xor.b32 %r36, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.xor.b64 %rd14, [%rd5], 1;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r37, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r38, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r39, [%rd5], 1, 0;
+; CHECK-NEXT: atom.release.shared::cluster.cas.b32 %r40, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b32 %r41, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b32 %r42, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r43, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r44, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r45, [%rd5], 1, 0;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b64 %rd15, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd16, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd17, [%rd5], 1, 0;
+; CHECK-NEXT: atom.release.shared::cluster.cas.b64 %rd18, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b64 %rd19, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b64 %rd20, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd21, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd22, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd23, [%rd5], 1, 0;
+; CHECK-NEXT: and.b64 %rd1, %rd5, -4;
+; CHECK-NEXT: cvt.u32.u64 %r46, %rd5;
+; CHECK-NEXT: and.b32 %r47, %r46, 3;
+; CHECK-NEXT: shl.b32 %r1, %r47, 3;
+; CHECK-NEXT: mov.b32 %r48, 65535;
+; CHECK-NEXT: shl.b32 %r49, %r48, %r1;
+; CHECK-NEXT: not.b32 %r2, %r49;
+; CHECK-NEXT: mov.b32 %r50, 1;
+; CHECK-NEXT: shl.b32 %r3, %r50, %r1;
+; CHECK-NEXT: ld.shared::cluster.u32 %r51, [%rd1];
+; CHECK-NEXT: and.b32 %r64, %r51, %r2;
+; CHECK-NEXT: $L__BB0_1: // %partword.cmpxchg.loop33
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r52, %r64, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r6, [%rd1], %r52, %r64;
+; CHECK-NEXT: setp.eq.s32 %p3, %r6, %r52;
+; CHECK-NEXT: @%p3 bra $L__BB0_3;
+; CHECK-NEXT: // %bb.2: // %partword.cmpxchg.failure32
+; CHECK-NEXT: // in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: and.b32 %r7, %r6, %r2;
+; CHECK-NEXT: setp.ne.s32 %p4, %r64, %r7;
+; CHECK-NEXT: mov.b32 %r64, %r7;
+; CHECK-NEXT: @%p4 bra $L__BB0_1;
+; CHECK-NEXT: $L__BB0_3: // %partword.cmpxchg.end31
+; CHECK-NEXT: ld.shared::cluster.u32 %r53, [%rd1];
+; CHECK-NEXT: and.b32 %r65, %r53, %r2;
+; CHECK-NEXT: $L__BB0_4: // %partword.cmpxchg.loop23
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r54, %r65, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r10, [%rd1], %r54, %r65;
+; CHECK-NEXT: setp.eq.s32 %p5, %r10, %r54;
+; CHECK-NEXT: @%p5 bra $L__BB0_6;
+; CHECK-NEXT: // %bb.5: // %partword.cmpxchg.failure22
+; CHECK-NEXT: // in Loop: Header=BB0_4 Depth=1
+; CHECK-NEXT: and.b32 %r11, %r10, %r2;
+; CHECK-NEXT: setp.ne.s32 %p6, %r65, %r11;
+; CHECK-NEXT: mov.b32 %r65, %r11;
+; CHECK-NEXT: @%p6 bra $L__BB0_4;
+; CHECK-NEXT: $L__BB0_6: // %partword.cmpxchg.end21
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: ld.shared::cluster.u32 %r55, [%rd1];
+; CHECK-NEXT: and.b32 %r66, %r55, %r2;
+; CHECK-NEXT: $L__BB0_7: // %partword.cmpxchg.loop13
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r56, %r66, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r14, [%rd1], %r56, %r66;
+; CHECK-NEXT: setp.eq.s32 %p7, %r14, %r56;
+; CHECK-NEXT: @%p7 bra $L__BB0_9;
+; CHECK-NEXT: // %bb.8: // %partword.cmpxchg.failure12
+; CHECK-NEXT: // in Loop: Header=BB0_7 Depth=1
+; CHECK-NEXT: and.b32 %r15, %r14, %r2;
+; CHECK-NEXT: setp.ne.s32 %p8, %r66, %r15;
+; CHECK-NEXT: mov.b32 %r66, %r15;
+; CHECK-NEXT: @%p8 bra $L__BB0_7;
+; CHECK-NEXT: $L__BB0_9: // %partword.cmpxchg.end11
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: ld.shared::cluster.u32 %r57, [%rd1];
+; CHECK-NEXT: and.b32 %r67, %r57, %r2;
+; CHECK-NEXT: $L__BB0_10: // %partword.cmpxchg.loop3
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r58, %r67, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r18, [%rd1], %r58, %r67;
+; CHECK-NEXT: setp.eq.s32 %p9, %r18, %r58;
+; CHECK-NEXT: @%p9 bra $L__BB0_12;
+; CHECK-NEXT: // %bb.11: // %partword.cmpxchg.failure2
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=1
+; CHECK-NEXT: and.b32 %r19, %r18, %r2;
+; CHECK-NEXT: setp.ne.s32 %p10, %r67, %r19;
+; CHECK-NEXT: mov.b32 %r67, %r19;
+; CHECK-NEXT: @%p10 bra $L__BB0_10;
+; CHECK-NEXT: $L__BB0_12: // %partword.cmpxchg.end1
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: ld.shared::cluster.u32 %r59, [%rd1];
+; CHECK-NEXT: and.b32 %r68, %r59, %r2;
+; CHECK-NEXT: $L__BB0_13: // %partword.cmpxchg.loop
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r60, %r68, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r22, [%rd1], %r60, %r68;
+; CHECK-NEXT: setp.eq.s32 %p11, %r22, %r60;
+; CHECK-NEXT: @%p11 bra $L__BB0_15;
+; CHECK-NEXT: // %bb.14: // %partword.cmpxchg.failure
+; CHECK-NEXT: // in Loop: Header=BB0_13 Depth=1
+; CHECK-NEXT: and.b32 %r23, %r22, %r2;
+; CHECK-NEXT: setp.ne.s32 %p12, %r68, %r23;
+; CHECK-NEXT: mov.b32 %r68, %r23;
+; CHECK-NEXT: @%p12 bra $L__BB0_13;
+; CHECK-NEXT: $L__BB0_15: // %partword.cmpxchg.end
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: selp.u32 %r61, 1, 0, %p1;
+; CHECK-NEXT: selp.u32 %r62, 1, 0, %p2;
+; CHECK-NEXT: add.s32 %r63, %r61, %r62;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r63;
+; CHECK-NEXT: ret;
+entry:
+ %0 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
+ %1 = xor i32 %0, 1
+ %2 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %ptr)
+ %3 = tail call ptr @llvm.nvvm.mapa(ptr %ptr, i32 %1)
+ %4 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %3)
+ %dsmem_ptr = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %smem_ptr, i32 %1)
+
+ ; Floating point atomic operations
+ %5 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, half 1.000000e+00 seq_cst
+ %6 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, bfloat 1.000000e+00 seq_cst
+ %7 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, float 1.000000e+00 seq_cst
+ %8 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, double 1.000000e+00 seq_cst
+
+ ; Integer add operations
+ %9 = atomicrmw add ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %10 = atomicrmw add ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Exchange operations
+ %11 = atomicrmw xchg ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %12 = atomicrmw xchg ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Min operations (signed and unsigned)
+ %13 = atomicrmw min ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %14 = atomicrmw min ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %15 = atomicrmw umin ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %16 = atomicrmw umin ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Max operations (signed and unsigned)
+ %17 = atomicrmw max ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %18 = atomicrmw max ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %19 = atomicrmw umax ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %20 = atomicrmw umax ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Inc/Dec operations (32-bit only)
+ %21 = atomicrmw uinc_wrap ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %22 = atomicrmw udec_wrap ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+
+ ; Bitwise operations
+ %23 = atomicrmw and ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %24 = atomicrmw and ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %25 = atomicrmw or ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %26 = atomicrmw or ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %27 = atomicrmw xor ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %28 = atomicrmw xor ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Compare-exchange operation - all memory ordering combinations for 32-bit
+ %29 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 monotonic monotonic
+ %30 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acquire monotonic
+ %31 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acquire acquire
+ %32 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 release monotonic
+ %33 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acq_rel monotonic
+ %34 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acq_rel acquire
+ %35 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst monotonic
+ %36 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst acquire
+ %37 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst seq_cst
+
+ ; Compare-exchange operation - all memory ordering combinations for 64-bit
+ %38 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 monotonic monotonic
+ %39 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acquire monotonic
+ %40 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acquire acquire
+ %41 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 release monotonic
+ %42 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acq_rel monotonic
+ %43 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acq_rel acquire
+ %44 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst monotonic
+ %45 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst acquire
+ %46 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst seq_cst
+
+ ; Compare-exchange operation - 16-bit
+ %47 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 monotonic monotonic
+ %48 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 acquire acquire
+ %49 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 release monotonic
+ %50 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 acq_rel acquire
+ %51 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 seq_cst seq_cst
+
+ ; Return value preserves the isspacep test results
+ %52 = zext i1 %2 to i32
+ %53 = zext i1 %4 to i32
+ %ret = add i32 %52, %53
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/NVPTX/nvptx-aa.ll b/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
index 074e741dc3e94..c1071f4909986 100644
--- a/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
+++ b/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
@@ -18,25 +18,33 @@ target triple = "nvptx64-nvidia-cuda"
; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(5)* %local
; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(3)* %shared
; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(5)* %local
+; CHECK-ALIAS: MayAlias: i8 addrspace(7)* %dshared, i8* %gen
+; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(1)* %global
+; CHECK-ALIAS: MayAlias: i8 addrspace(7)* %dshared, i8 addrspace(3)* %shared
+; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(7)* %dshared
+; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(5)* %local
; CHECK-ALIAS: MayAlias: i8* %gen, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(101)* %param, i8 addrspace(3)* %shared
; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(101)* %param
+; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(101)* %param
-define i8 @test_alias(ptr %gen, ptr addrspace(1) %global, ptr addrspace(3) %shared, ptr addrspace(4) %const, ptr addrspace(5) %local) {
+define i8 @test_alias(ptr %gen, ptr addrspace(1) %global, ptr addrspace(3) %shared, ptr addrspace(4) %const, ptr addrspace(5) %local, ptr addrspace(7) %dshared) {
%param = addrspacecast ptr %gen to ptr addrspace(101)
%v1 = load i8, ptr %gen
%v2 = load i8, ptr addrspace(1) %global
%v3 = load i8, ptr addrspace(3) %shared
%v4 = load i8, ptr addrspace(4) %const
%v5 = load i8, ptr addrspace(5) %local
- %v6 = load i8, ptr addrspace(101) %param
+ %v6 = load i8, ptr addrspace(7) %dshared
+ %v7 = load i8, ptr addrspace(101) %param
%res1 = add i8 %v1, %v2
%res2 = add i8 %res1, %v3
%res3 = add i8 %res2, %v4
%res4 = add i8 %res3, %v5
%res5 = add i8 %res4, %v6
+ %res6 = add i8 %res4, %v7
ret i8 %res5
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
index a9270c6f52344..fceaaa36cf8e8 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
@@ -43,7 +43,10 @@ enum NVVMMemorySpace {
/// Tensor memory space identifier.
/// Tensor memory is available only in arch-accelerated
/// variants from sm100 onwards.
- kTensorMemorySpace = 6
+ kTensorMemorySpace = 6,
+ /// Distributed shared memory space identifier.
+ /// Distributed shared memory is available only in sm80+.
+ kDSharedMemorySpace = 7,
};
/// Return the element type and number of elements associated with a wmma matrix
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 0a6e66919f021..9f66e77085ce7 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -25,6 +25,7 @@ def LLVM_PointerGeneric : LLVM_PointerInAddressSpace<0>;
def LLVM_PointerGlobal : LLVM_PointerInAddressSpace<1>;
def LLVM_PointerShared : LLVM_PointerInAddressSpace<3>;
def LLVM_PointerTensor : LLVM_PointerInAddressSpace<6>;
+def LLVM_PointerDShared : LLVM_PointerInAddressSpace<7>;
//===----------------------------------------------------------------------===//
// NVVM dialect definitions
@@ -2252,7 +2253,7 @@ def NVVM_CpAsyncBulkGlobalToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerShared:$dstMem,
+ LLVM_PointerDShared:$dstMem,
LLVM_PointerGlobal:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size,
@@ -2309,7 +2310,7 @@ def NVVM_CpAsyncBulkSharedCTAToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerShared:$dstMem,
+ LLVM_PointerDShared:$dstMem,
LLVM_PointerShared:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size);
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
index 0e3f98a134491..39b703d9a9677 100644
--- a/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
@@ -1,25 +1,25 @@
// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
// CHECK-LABEL: @llvm_nvvm_cp_async_bulk_global_to_shared_cluster
-llvm.func @llvm_nvvm_cp_async_bulk_global_to_shared_cluster(%dst : !llvm.ptr<3>, %src : !llvm.ptr<1>, %mbar : !llvm.ptr<3>, %size : i32, %mc : i16, %ch : i64) {
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST:.*]], ptr addrspace(3) %[[MBAR:.*]], ptr addrspace(1) %[[SRC:.*]], i32 %[[SIZE:.*]], i16 0, i64 0, i1 false, i1 false)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 0, i64 %[[CH:.*]], i1 false, i1 true)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC:.*]], i64 0, i1 true, i1 false)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC]], i64 %[[CH]], i1 true, i1 true)
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size : !llvm.ptr<3>, !llvm.ptr<1>
+llvm.func @llvm_nvvm_cp_async_bulk_global_to_shared_cluster(%dst : !llvm.ptr<7>, %src : !llvm.ptr<1>, %mbar : !llvm.ptr<3>, %size : i32, %mc : i16, %ch : i64) {
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST:.*]], ptr addrspace(3) %[[MBAR:.*]], ptr addrspace(1) %[[SRC:.*]], i32 %[[SIZE:.*]], i16 0, i64 0, i1 false, i1 false)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 0, i64 %[[CH:.*]], i1 false, i1 true)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC:.*]], i64 0, i1 true, i1 false)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC]], i64 %[[CH]], i1 true, i1 true)
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size : !llvm.ptr<7>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size l2_cache_hint = %ch : !llvm.ptr<3>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size l2_cache_hint = %ch : !llvm.ptr<7>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc : !llvm.ptr<3>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc : !llvm.ptr<7>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc l2_cache_hint = %ch : !llvm.ptr<3>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc l2_cache_hint = %ch : !llvm.ptr<7>, !llvm.ptr<1>
llvm.return
}
// CHECK-LABEL: @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster
-llvm.func @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster(%dst : !llvm.ptr<3>, %src : !llvm.ptr<3>, %mbar : !llvm.ptr<3>, %size : i32) {
- // CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %0, ptr addrspace(3) %2, ptr addrspace(3) %1, i32 %3)
- nvvm.cp.async.bulk.shared.cluster.shared.cta %dst, %src, %mbar, %size : !llvm.ptr<3>, !llvm.ptr<3>
+llvm.func @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster(%dst : !llvm.ptr<7>, %src : !llvm.ptr<3>, %mbar : !llvm.ptr<3>, %size : i32) {
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %0, ptr addrspace(3) %2, ptr addrspace(3) %1, i32 %3)
+ nvvm.cp.async.bulk.shared.cluster.shared.cta %dst, %src, %mbar, %size : !llvm.ptr<7>, !llvm.ptr<3>
llvm.return
}
>From 327c0c8965e29379938de68d5675e1a319edcce2 Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Fri, 11 Apr 2025 19:30:32 -0700
Subject: [PATCH 2/9] change to shared_cluster, update tests
---
llvm/include/llvm/IR/IntrinsicsNVVM.td | 54 +++++++--------
llvm/include/llvm/Support/NVPTXAddrSpace.h | 2 +-
llvm/lib/IR/AutoUpgrade.cpp | 8 +--
.../NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp | 2 +-
llvm/lib/Target/NVPTX/NVPTX.h | 2 +-
llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp | 5 +-
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 16 +++--
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 17 +++--
llvm/lib/Target/NVPTX/NVPTXUtilities.h | 2 +-
.../Assembler/auto_upgrade_nvvm_intrinsics.ll | 68 +++++++++----------
.../NVPTX/distributed-shared-cluster.ll | 4 +-
llvm/test/CodeGen/NVPTX/nvptx-aa.ll | 16 ++---
.../include/mlir/Dialect/LLVMIR/NVVMDialect.h | 2 +-
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td | 6 +-
14 files changed, 106 insertions(+), 98 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index e415fcd8707de..94367be6ee8e2 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -127,11 +127,11 @@
// * llvm.nvvm.atomic.load.inc.32 --> atomicrmw uinc_wrap
// * llvm.nvvm.atomic.load.dec.32 --> atomicrmw udec_wrap
-def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
-def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
-def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
-def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
-def llvm_dshared_ptr_ty : LLVMQualPointerType<7>; // (dshared)ptr
+def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
+def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
+def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
+def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
+def llvm_shared_cluster_ptr_ty : LLVMQualPointerType<7>; // (shared_cluster)ptr
//
// MISC
@@ -692,15 +692,15 @@ class CP_ASYNC_BULK_TENSOR_G2S_INTR<int dim, string mode> {
list<LLVMType> Im2ColOffsetsTy = !listsplat(llvm_i16_ty, NumIm2ColOffsets);
list<LLVMType> TensorDimsTy = !listsplat(llvm_i32_ty, dim);
list<LLVMType> ArgsTy = !listconcat(
- [llvm_dshared_ptr_ty, // dst_smem_ptr
- llvm_shared_ptr_ty, // mbarrier_smem_ptr
- llvm_ptr_ty], // tensormap_ptr
- TensorDimsTy, // actual tensor dims
- Im2ColOffsetsTy, // im2col offsets
- [llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty] // Flag for cache_hint
+ [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
+ llvm_shared_ptr_ty, // mbarrier_smem_ptr
+ llvm_ptr_ty], // tensormap_ptr
+ TensorDimsTy, // actual tensor dims
+ Im2ColOffsetsTy, // im2col offsets
+ [llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty] // Flag for cache_hint
);
int TempFlagsStartIdx = !add(dim, 5);
@@ -5135,7 +5135,7 @@ def int_nvvm_mapa
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa">;
def int_nvvm_mapa_shared_cluster
- : DefaultAttrsIntrinsic<[llvm_dshared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
+ : DefaultAttrsIntrinsic<[llvm_shared_cluster_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa.shared.cluster">;
def int_nvvm_getctarank
@@ -5235,14 +5235,14 @@ def int_nvvm_discard_L2 : DefaultAttrsIntrinsic<[],
// From Global to Shared Cluster
def int_nvvm_cp_async_bulk_global_to_shared_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_dshared_ptr_ty, // dst_dsmem_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_global_ptr_ty, // src_gmem_ptr
- llvm_i32_ty, // copy_size
- llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty], // Flag for cache_hint
+ [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_global_ptr_ty, // src_gmem_ptr
+ llvm_i32_ty, // copy_size
+ llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty], // Flag for cache_hint
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
@@ -5252,10 +5252,10 @@ def int_nvvm_cp_async_bulk_global_to_shared_cluster
// From Shared CTA to Shared Cluster
def int_nvvm_cp_async_bulk_shared_cta_to_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_dshared_ptr_ty, // dst_dsmem_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_shared_ptr_ty, // src_smem_ptr
- llvm_i32_ty], // copy_size
+ [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_shared_ptr_ty, // src_smem_ptr
+ llvm_i32_ty], // copy_size
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
diff --git a/llvm/include/llvm/Support/NVPTXAddrSpace.h b/llvm/include/llvm/Support/NVPTXAddrSpace.h
index a3eac31f2e5e9..04f74c34787cc 100644
--- a/llvm/include/llvm/Support/NVPTXAddrSpace.h
+++ b/llvm/include/llvm/Support/NVPTXAddrSpace.h
@@ -25,7 +25,7 @@ enum AddressSpace : unsigned {
ADDRESS_SPACE_CONST = 4,
ADDRESS_SPACE_LOCAL = 5,
ADDRESS_SPACE_TENSOR = 6,
- ADDRESS_SPACE_DSHARED = 7,
+ ADDRESS_SPACE_SHARED_CLUSTER = 7,
ADDRESS_SPACE_PARAM = 101,
};
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index f66935ad4515d..1aac285b58e5f 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -939,8 +939,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
return false; // No other 'arm.*', 'aarch64.*'.
}
-static Intrinsic::ID shouldUpgradeNVPTXDSharedIntrinsic(Function *F,
- StringRef Name) {
+static Intrinsic::ID shouldUpgradeNVPTXSharedClusterIntrinsic(Function *F,
+ StringRef Name) {
if (Name.consume_front("mapa.shared.cluster"))
if (F->getReturnType()->getPointerAddressSpace() ==
NVPTXAS::ADDRESS_SPACE_SHARED)
@@ -1321,7 +1321,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
}
// Upgrade Distributed Shared Memory Intrinsics
- Intrinsic::ID IID = shouldUpgradeNVPTXDSharedIntrinsic(F, Name);
+ Intrinsic::ID IID = shouldUpgradeNVPTXSharedClusterIntrinsic(F, Name);
if (IID != Intrinsic::not_intrinsic) {
rename(F);
NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
@@ -4797,7 +4797,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
Args[0] = Builder.CreateAddrSpaceCast(
Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_GENERIC));
Args[0] = Builder.CreateAddrSpaceCast(
- Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_DSHARED));
+ Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER));
NewCall = Builder.CreateCall(NewFn, Args);
NewCall->takeName(CI);
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index 8dbc739e3bef9..0b137250e4e59 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -285,7 +285,7 @@ void NVPTXInstPrinter::printLdStCode(const MCInst *MI, int OpNum,
case NVPTX::AddressSpace::Global:
case NVPTX::AddressSpace::Const:
case NVPTX::AddressSpace::Shared:
- case NVPTX::AddressSpace::Dshared:
+ case NVPTX::AddressSpace::SharedCluster:
case NVPTX::AddressSpace::Param:
case NVPTX::AddressSpace::Local:
O << "." << A;
diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h
index c20c522f36bd3..cf21ad991ccdf 100644
--- a/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/llvm/lib/Target/NVPTX/NVPTX.h
@@ -176,7 +176,7 @@ enum AddressSpace : AddressSpaceUnderlyingType {
Shared = 3,
Const = 4,
Local = 5,
- Dshared = 7,
+ SharedCluster = 7,
// NVPTX Backend Private:
Param = 101
diff --git a/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp b/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
index 60bc22f5f589c..a579783802aa2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
@@ -87,8 +87,9 @@ static AliasResult::Kind getAliasResult(unsigned AS1, unsigned AS2) {
// rules once it is added.
// Distributed shared memory aliases with shared memory.
- if (((AS1 == ADDRESS_SPACE_SHARED) && (AS2 == ADDRESS_SPACE_DSHARED)) ||
- ((AS1 == ADDRESS_SPACE_DSHARED) && (AS2 == ADDRESS_SPACE_SHARED)))
+ if (((AS1 == ADDRESS_SPACE_SHARED) &&
+ (AS2 == ADDRESS_SPACE_SHARED_CLUSTER)) ||
+ ((AS1 == ADDRESS_SPACE_SHARED_CLUSTER) && (AS2 == ADDRESS_SPACE_SHARED)))
return AliasResult::MayAlias;
return (AS1 == AS2 ? AliasResult::MayAlias : AliasResult::NoAlias);
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index bc07999e142c6..bbaa1dfe3bc0c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -513,8 +513,8 @@ static std::optional<unsigned> convertAS(unsigned AS) {
return NVPTX::AddressSpace::Global;
case llvm::ADDRESS_SPACE_SHARED:
return NVPTX::AddressSpace::Shared;
- case llvm::ADDRESS_SPACE_DSHARED:
- return NVPTX::AddressSpace::Dshared;
+ case llvm::ADDRESS_SPACE_SHARED_CLUSTER:
+ return NVPTX::AddressSpace::SharedCluster;
case llvm::ADDRESS_SPACE_GENERIC:
return NVPTX::AddressSpace::Generic;
case llvm::ADDRESS_SPACE_PARAM:
@@ -661,7 +661,7 @@ getOperationOrderings(MemSDNode *N, const NVPTXSubtarget *Subtarget) {
(CodeAddrSpace == NVPTX::AddressSpace::Generic ||
CodeAddrSpace == NVPTX::AddressSpace::Global ||
CodeAddrSpace == NVPTX::AddressSpace::Shared ||
- CodeAddrSpace == NVPTX::AddressSpace::Dshared);
+ CodeAddrSpace == NVPTX::AddressSpace::SharedCluster);
if (!AddrGenericOrGlobalOrShared)
return NVPTX::Ordering::NotAtomic;
@@ -982,8 +982,9 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
case ADDRESS_SPACE_SHARED:
Opc = TM.is64Bit() ? NVPTX::cvta_shared_64 : NVPTX::cvta_shared;
break;
- case ADDRESS_SPACE_DSHARED:
- Opc = TM.is64Bit() ? NVPTX::cvta_dshared_64 : NVPTX::cvta_dshared;
+ case ADDRESS_SPACE_SHARED_CLUSTER:
+ Opc = TM.is64Bit() ? NVPTX::cvta_shared_cluster_64
+ : NVPTX::cvta_shared_cluster;
break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_const_64 : NVPTX::cvta_const;
@@ -1010,8 +1011,9 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
case ADDRESS_SPACE_SHARED:
Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_64 : NVPTX::cvta_to_shared;
break;
- case ADDRESS_SPACE_DSHARED:
- Opc = TM.is64Bit() ? NVPTX::cvta_to_dshared_64 : NVPTX::cvta_to_dshared;
+ case ADDRESS_SPACE_SHARED_CLUSTER:
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_cluster_64
+ : NVPTX::cvta_to_shared_cluster;
break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_to_const_64 : NVPTX::cvta_to_const;
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index e66ad45304676..e39ccfb14a5fc 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -33,8 +33,8 @@ def AS_match {
code shared = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_SHARED);
}];
- code dshared = [{
- return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_DSHARED);
+ code shared_cluster = [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_SHARED_CLUSTER);
}];
code global = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_GLOBAL);
@@ -2042,8 +2042,8 @@ class ATOMIC_GLOBAL_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.global>;
class ATOMIC_SHARED_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.shared>;
-class ATOMIC_DSHARED_CHK <dag frag>
- : PatFrag<!setdagop(frag, ops), frag, AS_match.dshared>;
+class ATOMIC_SHARED_CLUSTER_CHK <dag frag>
+ : PatFrag<!setdagop(frag, ops), frag, AS_match.shared_cluster>;
class ATOMIC_GENERIC_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.generic>;
@@ -2098,7 +2098,7 @@ multiclass F_ATOMIC_2_AS<RegTyInfo t, SDPatternOperator frag, string op_str, lis
defvar frag_pat = (frag node:$a, node:$b);
defm _G : F_ATOMIC_2<t, "", ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_2<t, "", ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
- defm _DS : F_ATOMIC_2<t, "", ".shared::cluster", op_str, ATOMIC_DSHARED_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
+ defm _S_C : F_ATOMIC_2<t, "", ".shared::cluster", op_str, ATOMIC_SHARED_CLUSTER_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
defm _GEN : F_ATOMIC_2<t, "", "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2106,7 +2106,7 @@ multiclass F_ATOMIC_3_AS<RegTyInfo t, SDPatternOperator frag, string sem_str, st
defvar frag_pat = (frag node:$a, node:$b, node:$c);
defm _G : F_ATOMIC_3<t, sem_str, ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_3<t, sem_str, ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
- defm _DS : F_ATOMIC_3<t, sem_str, ".shared::cluster", op_str, ATOMIC_DSHARED_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
+ defm _S_C : F_ATOMIC_3<t, sem_str, ".shared::cluster", op_str, ATOMIC_SHARED_CLUSTER_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
defm _GEN : F_ATOMIC_3<t, sem_str, "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2406,6 +2406,11 @@ foreach space = ["local", "shared", "global", "const", "param"] in {
defm cvta_to_#space : G_TO_NG<space>;
}
+foreach space = ["shared::cluster"] in {
+ defm cvta_#space : NG_TO_G<space>;
+ defm cvta_to_#space : G_TO_NG<space>;
+}
+
def : Pat<(int_nvvm_ptr_param_to_gen i32:$src),
(cvta_param $src)>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
index 90cf35b1d8d77..a1b4a0e5e7471 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -168,7 +168,7 @@ inline std::string AddressSpaceToString(AddressSpace A) {
return "const";
case AddressSpace::Shared:
return "shared";
- case AddressSpace::Dshared:
+ case AddressSpace::SharedCluster:
return "shared::cluster";
case AddressSpace::Param:
return "param";
diff --git a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
index 7792a4cc92f38..a361319d11473 100644
--- a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -59,18 +59,20 @@ declare float @llvm.nvvm.ldg.global.f.f32.p0(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.inc.32(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.dec.32(ptr, i32)
-declare ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i64)
+declare ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i32)
declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
; CHECK-LABEL: @simple_upgrade
define void @simple_upgrade(i32 %a, i64 %b, i16 %c) {
@@ -268,46 +270,44 @@ define i32 @atomics(ptr %p0, i32 %a) {
}
; CHECK-LABEL: @nvvm_shared_cluster_intrinsics
-define void @nvvm_shared_cluster_intrinsics(ptr addrspace(3) %p0, i64 %offset) {
-; CHECK: %[[ASC1:.*]] = addrspacecast ptr addrspace(3) %p0 to ptr addrspace(7)
-; CHECK: %[[CALL:.*]] = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(7) %[[ASC1]], i64 %offset)
-; CHECK: %[[ASC2:.*]] = addrspacecast ptr addrspace(7) %[[CALL]] to ptr addrspace(3)
- %r = call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i64 %offset)
+define void @nvvm_shared_cluster_intrinsics(ptr addrspace(3) %p0, i32 %offset) {
+; CHECK: %r = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i32 %offset)
+ %r = call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i32 %offset)
ret void
}
; CHECK-LABEL: @nvvm_cp_async_bulk_intrinsics
define void @nvvm_cp_async_bulk_intrinsics(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, ptr addrspace(3) %src_shared, i32 %size) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
ret void
}
; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_im2col
-define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i16 0, i64 0, i1 false, i1 false)
+define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %6, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
ret void
}
; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_tile
-define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i16 0, i64 0, i1 false, i1 false)
+define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %6, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %8, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %10, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
index f499500a64ebf..aaad2b5dc880a 100644
--- a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
+++ b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
@@ -170,8 +170,8 @@ define i32 @test_distributed_shared_cluster(ptr %ptr, ptr addrspace(3) %smem_ptr
; CHECK-NEXT: @%p12 bra $L__BB0_13;
; CHECK-NEXT: $L__BB0_15: // %partword.cmpxchg.end
; CHECK-NEXT: fence.acq_rel.sys;
-; CHECK-NEXT: selp.u32 %r61, 1, 0, %p1;
-; CHECK-NEXT: selp.u32 %r62, 1, 0, %p2;
+; CHECK-NEXT: selp.b32 %r61, 1, 0, %p1;
+; CHECK-NEXT: selp.b32 %r62, 1, 0, %p2;
; CHECK-NEXT: add.s32 %r63, %r61, %r62;
; CHECK-NEXT: st.param.b32 [func_retval0], %r63;
; CHECK-NEXT: ret;
diff --git a/llvm/test/CodeGen/NVPTX/nvptx-aa.ll b/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
index c1071f4909986..924220326c341 100644
--- a/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
+++ b/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
@@ -18,26 +18,26 @@ target triple = "nvptx64-nvidia-cuda"
; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(5)* %local
; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(3)* %shared
; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(5)* %local
-; CHECK-ALIAS: MayAlias: i8 addrspace(7)* %dshared, i8* %gen
-; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(1)* %global
-; CHECK-ALIAS: MayAlias: i8 addrspace(7)* %dshared, i8 addrspace(3)* %shared
-; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(7)* %dshared
-; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(5)* %local
+; CHECK-ALIAS: MayAlias: i8* %gen, i8 addrspace(7)* %shared_cluster
+; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(7)* %shared_cluster
+; CHECK-ALIAS: MayAlias: i8 addrspace(3)* %shared, i8 addrspace(7)* %shared_cluster
+; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(7)* %shared_cluster
+; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(7)* %shared_cluster
; CHECK-ALIAS: MayAlias: i8* %gen, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(101)* %param, i8 addrspace(3)* %shared
; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(101)* %param
-; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(101)* %param
+; CHECK-ALIAS: NoAlias: i8 addrspace(101)* %param, i8 addrspace(7)* %shared_cluster
-define i8 @test_alias(ptr %gen, ptr addrspace(1) %global, ptr addrspace(3) %shared, ptr addrspace(4) %const, ptr addrspace(5) %local, ptr addrspace(7) %dshared) {
+define i8 @test_alias(ptr %gen, ptr addrspace(1) %global, ptr addrspace(3) %shared, ptr addrspace(4) %const, ptr addrspace(5) %local, ptr addrspace(7) %shared_cluster) {
%param = addrspacecast ptr %gen to ptr addrspace(101)
%v1 = load i8, ptr %gen
%v2 = load i8, ptr addrspace(1) %global
%v3 = load i8, ptr addrspace(3) %shared
%v4 = load i8, ptr addrspace(4) %const
%v5 = load i8, ptr addrspace(5) %local
- %v6 = load i8, ptr addrspace(7) %dshared
+ %v6 = load i8, ptr addrspace(7) %shared_cluster
%v7 = load i8, ptr addrspace(101) %param
%res1 = add i8 %v1, %v2
%res2 = add i8 %res1, %v3
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
index fceaaa36cf8e8..cd6f3af2e9241 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
@@ -46,7 +46,7 @@ enum NVVMMemorySpace {
kTensorMemorySpace = 6,
/// Distributed shared memory space identifier.
/// Distributed shared memory is available only in sm80+.
- kDSharedMemorySpace = 7,
+ kSharedClusterMemorySpace = 7,
};
/// Return the element type and number of elements associated with a wmma matrix
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 9f66e77085ce7..5f41d428479b8 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -25,7 +25,7 @@ def LLVM_PointerGeneric : LLVM_PointerInAddressSpace<0>;
def LLVM_PointerGlobal : LLVM_PointerInAddressSpace<1>;
def LLVM_PointerShared : LLVM_PointerInAddressSpace<3>;
def LLVM_PointerTensor : LLVM_PointerInAddressSpace<6>;
-def LLVM_PointerDShared : LLVM_PointerInAddressSpace<7>;
+def LLVM_PointerSharedCluster : LLVM_PointerInAddressSpace<7>;
//===----------------------------------------------------------------------===//
// NVVM dialect definitions
@@ -2253,7 +2253,7 @@ def NVVM_CpAsyncBulkGlobalToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerDShared:$dstMem,
+ LLVM_PointerSharedCluster:$dstMem,
LLVM_PointerGlobal:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size,
@@ -2310,7 +2310,7 @@ def NVVM_CpAsyncBulkSharedCTAToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerDShared:$dstMem,
+ LLVM_PointerSharedCluster:$dstMem,
LLVM_PointerShared:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size);
>From 2318ddd9f0f6db57217a4c88c969f9b45db571d8 Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Tue, 15 Apr 2025 18:27:36 -0700
Subject: [PATCH 3/9] review feedback: split and add tests, remove 32-bit
cvta.*shared::cluster, allow isel to lower shared::cta <-> shared::cluster
addrspacecasts
---
llvm/lib/IR/AutoUpgrade.cpp | 6 +-
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 12 +-
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 36 +-
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 26 +-
.../Assembler/auto_upgrade_nvvm_intrinsics.ll | 20 +-
.../test/CodeGen/NVPTX/addrspacecast-ptx64.ll | 48 ++
.../NVPTX/distributed-shared-cluster.ll | 479 ++++++++++--------
.../include/mlir/Dialect/LLVMIR/NVVMDialect.h | 2 +-
8 files changed, 396 insertions(+), 233 deletions(-)
create mode 100644 llvm/test/CodeGen/NVPTX/addrspacecast-ptx64.ll
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 1aac285b58e5f..01e9b61b38944 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -4773,8 +4773,6 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
NewCall =
Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)});
Value *Res = NewCall;
- Res = Builder.CreateAddrSpaceCast(
- Res, Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_GENERIC));
Res = Builder.CreateAddrSpaceCast(
Res, Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_SHARED));
NewCall->takeName(CI);
@@ -4792,10 +4790,8 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d:
case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d:
case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d: {
-
+ // Create a new call with the correct address space.
SmallVector<Value *, 4> Args(CI->args());
- Args[0] = Builder.CreateAddrSpaceCast(
- Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_GENERIC));
Args[0] = Builder.CreateAddrSpaceCast(
Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER));
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index bbaa1dfe3bc0c..032975ed663e9 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -983,8 +983,10 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
Opc = TM.is64Bit() ? NVPTX::cvta_shared_64 : NVPTX::cvta_shared;
break;
case ADDRESS_SPACE_SHARED_CLUSTER:
- Opc = TM.is64Bit() ? NVPTX::cvta_shared_cluster_64
- : NVPTX::cvta_shared_cluster;
+ if (!TM.is64Bit())
+ report_fatal_error(
+ "Shared cluster address space is only supported in 64-bit mode");
+ Opc = NVPTX::cvta_shared_cluster_64;
break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_const_64 : NVPTX::cvta_const;
@@ -1012,8 +1014,10 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_64 : NVPTX::cvta_to_shared;
break;
case ADDRESS_SPACE_SHARED_CLUSTER:
- Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_cluster_64
- : NVPTX::cvta_to_shared_cluster;
+ if (!TM.is64Bit())
+ report_fatal_error(
+ "Shared cluster address space is only supported in 64-bit mode");
+ Opc = NVPTX::cvta_to_shared_cluster_64;
break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_to_const_64 : NVPTX::cvta_to_const;
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 49f4f30096f00..ef768fabdb637 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -3043,8 +3043,42 @@ SDValue NVPTXTargetLowering::LowerADDRSPACECAST(SDValue Op,
unsigned SrcAS = N->getSrcAddressSpace();
unsigned DestAS = N->getDestAddressSpace();
if (SrcAS != llvm::ADDRESS_SPACE_GENERIC &&
- DestAS != llvm::ADDRESS_SPACE_GENERIC)
+ DestAS != llvm::ADDRESS_SPACE_GENERIC) {
+ // Shared and SharedCluster can be converted to each other through generic
+ // space
+ if (SrcAS == llvm::ADDRESS_SPACE_SHARED &&
+ DestAS == llvm::ADDRESS_SPACE_SHARED_CLUSTER) {
+ const MVT GenerictVT =
+ getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_GENERIC);
+ const MVT SharedClusterVT =
+ getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_SHARED_CLUSTER);
+ SDValue GenericConversion =
+ DAG.getAddrSpaceCast(SDLoc(), GenerictVT, Op.getOperand(0),
+ ADDRESS_SPACE_SHARED, ADDRESS_SPACE_GENERIC);
+ SDValue SharedClusterConversion = DAG.getAddrSpaceCast(
+ SDLoc(), SharedClusterVT, GenericConversion, ADDRESS_SPACE_GENERIC,
+ ADDRESS_SPACE_SHARED_CLUSTER);
+ return SharedClusterConversion;
+ }
+
+ if (SrcAS == llvm::ADDRESS_SPACE_SHARED_CLUSTER &&
+ DestAS == llvm::ADDRESS_SPACE_SHARED) {
+ const MVT GenerictVT =
+ getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_GENERIC);
+ const MVT SharedVT =
+ getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_SHARED);
+ SDValue GenericConversion = DAG.getAddrSpaceCast(
+ SDLoc(), GenerictVT, Op.getOperand(0), ADDRESS_SPACE_SHARED_CLUSTER,
+ ADDRESS_SPACE_GENERIC);
+ SDValue SharedConversion =
+ DAG.getAddrSpaceCast(SDLoc(), SharedVT, GenericConversion,
+ ADDRESS_SPACE_GENERIC, ADDRESS_SPACE_SHARED);
+ return SharedConversion;
+ }
+
return DAG.getUNDEF(Op.getValueType());
+ }
+
return Op;
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index e39ccfb14a5fc..8c542af4dc3e1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -2098,7 +2098,7 @@ multiclass F_ATOMIC_2_AS<RegTyInfo t, SDPatternOperator frag, string op_str, lis
defvar frag_pat = (frag node:$a, node:$b);
defm _G : F_ATOMIC_2<t, "", ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_2<t, "", ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
- defm _S_C : F_ATOMIC_2<t, "", ".shared::cluster", op_str, ATOMIC_SHARED_CLUSTER_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
+ defm _S_C : F_ATOMIC_2<t, "", ".shared::cluster", op_str, ATOMIC_SHARED_CLUSTER_CHK<frag_pat>, !listconcat([hasClusters], preds)>;
defm _GEN : F_ATOMIC_2<t, "", "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2106,7 +2106,7 @@ multiclass F_ATOMIC_3_AS<RegTyInfo t, SDPatternOperator frag, string sem_str, st
defvar frag_pat = (frag node:$a, node:$b, node:$c);
defm _G : F_ATOMIC_3<t, sem_str, ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_3<t, sem_str, ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
- defm _S_C : F_ATOMIC_3<t, sem_str, ".shared::cluster", op_str, ATOMIC_SHARED_CLUSTER_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
+ defm _S_C : F_ATOMIC_3<t, sem_str, ".shared::cluster", op_str, ATOMIC_SHARED_CLUSTER_CHK<frag_pat>, !listconcat([hasClusters], preds)>;
defm _GEN : F_ATOMIC_3<t, sem_str, "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2387,18 +2387,28 @@ def INT_PTX_LDG_G_v4i32_ELE : VLDG_G_ELE_V4<"u32", Int32Regs>;
def INT_PTX_LDG_G_v4f32_ELE : VLDG_G_ELE_V4<"f32", Float32Regs>;
-multiclass NG_TO_G<string Str> {
+multiclass NG_TO_G<string Str, list<Predicate> Preds = []> {
def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
- "cvta." # Str # ".u32 \t$result, $src;", []>;
+ "cvta." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- "cvta." # Str # ".u64 \t$result, $src;", []>;
+ "cvta." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
}
-multiclass G_TO_NG<string Str> {
+multiclass NG_TO_G_64<string Str, list<Predicate> Preds = []> {
+ def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "cvta." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
+}
+
+multiclass G_TO_NG<string Str, list<Predicate> Preds = []> {
def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
- "cvta.to." # Str # ".u32 \t$result, $src;", []>;
+ "cvta.to." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
+ def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "cvta.to." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
+}
+
+multiclass G_TO_NG_64<string Str, list<Predicate> Preds = []> {
def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- "cvta.to." # Str # ".u64 \t$result, $src;", []>;
+ "cvta.to." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
}
foreach space = ["local", "shared", "global", "const", "param"] in {
diff --git a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
index a361319d11473..e80ef7e9e1e72 100644
--- a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -278,8 +278,8 @@ define void @nvvm_shared_cluster_intrinsics(ptr addrspace(3) %p0, i32 %offset) {
; CHECK-LABEL: @nvvm_cp_async_bulk_intrinsics
define void @nvvm_cp_async_bulk_intrinsics(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, ptr addrspace(3) %src_shared, i32 %size) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
ret void
@@ -287,9 +287,9 @@ define void @nvvm_cp_async_bulk_intrinsics(ptr addrspace(3) %dst, ptr addrspace(
; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_im2col
define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %6, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %3, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 false, i1 false)
call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
@@ -298,11 +298,11 @@ define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %d, ptr addrs
; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_tile
define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %6, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %8, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %10, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %3, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %5, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 false, i1 false)
call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
diff --git a/llvm/test/CodeGen/NVPTX/addrspacecast-ptx64.ll b/llvm/test/CodeGen/NVPTX/addrspacecast-ptx64.ll
new file mode 100644
index 0000000000000..84ee45cb6ec06
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/addrspacecast-ptx64.ll
@@ -0,0 +1,48 @@
+; RUN: llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_80 | FileCheck %s -check-prefixes=ALL,NOPTRCONV,CLS64
+; RUN: llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_80 --nvptx-short-ptr | FileCheck %s -check-prefixes=ALL,PTRCONV,CLS64
+; RUN: %if ptxas-12.8 %{ llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_80 | %ptxas-verify %}
+; RUN: %if ptxas-12.8 %{ llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_80 --nvptx-short-ptr | %ptxas-verify %}
+
+; ALL-LABEL: conv_shared_cluster_to_generic
+define i32 @conv_shared_cluster_to_generic(ptr addrspace(7) %ptr) {
+; CLS32: cvta.shared::cluster.u32
+; PTRCONV: cvt.u64.u32
+; NOPTRCONV-NOT: cvt.u64.u32
+; CLS64: cvta.shared::cluster.u64
+; ALL: ld.u32
+ %genptr = addrspacecast ptr addrspace(7) %ptr to ptr
+ %val = load i32, ptr %genptr
+ ret i32 %val
+}
+
+; ALL-LABEL: conv_generic_to_shared_cluster
+define i32 @conv_generic_to_shared_cluster(ptr %ptr) {
+; CLS32: cvta.to.shared::cluster.u32
+; CLS64: cvta.to.shared::cluster.u64
+; PTRCONV: cvt.u32.u64
+; NOPTRCONV-NOT: cvt.u32.u64
+; ALL: ld.shared::cluster.u32
+ %specptr = addrspacecast ptr %ptr to ptr addrspace(7)
+ %val = load i32, ptr addrspace(7) %specptr
+ ret i32 %val
+}
+
+; ALL-LABEL: conv_shared_to_shared_cluster
+define i32 @conv_shared_to_shared_cluster(ptr addrspace(3) %ptr) {
+; CLS64: cvta.shared.u64
+; CLS64: cvta.to.shared::cluster.u64
+; ALL: ld.shared::cluster.u32
+ %specptr = addrspacecast ptr addrspace(3) %ptr to ptr addrspace(7)
+ %val = load i32, ptr addrspace(7) %specptr
+ ret i32 %val
+}
+
+; ALL-LABEL: conv_shared_cluster_to_shared
+define i32 @conv_shared_cluster_to_shared(ptr addrspace(7) %ptr) {
+; CLS64: cvta.shared::cluster.u64
+; CLS64: cvta.to.shared.u64
+; ALL: ld.shared.u32
+ %specptr = addrspacecast ptr addrspace(7) %ptr to ptr addrspace(3)
+ %val = load i32, ptr addrspace(3) %specptr
+ ret i32 %val
+}
\ No newline at end of file
diff --git a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
index aaad2b5dc880a..627065ef8fcfc 100644
--- a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
+++ b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
@@ -1,258 +1,329 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -o - -mcpu=sm_90 -march=nvptx64 -mattr=+ptx80 | FileCheck %s
-; RUN: %if ptxas-12.0 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| %ptxas-verify -arch=sm_90 %}
+; RUN: llc < %s -o - -mcpu=sm_90 -mattr=+ptx78 | FileCheck %s
+; RUN: %if ptxas-12.0 %{ llc < %s -mcpu=sm_90 -mattr=+ptx78| %ptxas-verify -arch=sm_90 %}
target triple = "nvptx64-nvidia-cuda"
- at llvm.used = appending global [1 x ptr] [ptr @test_distributed_shared_cluster], section "llvm.metadata"
+ at llvm.used = appending global [5 x ptr] [
+ ptr @test_distributed_shared_cluster_common,
+ ptr @test_distributed_shared_cluster_float_atomic,
+ ptr @test_distributed_shared_cluster_int_atomic,
+ ptr @test_distributed_shared_cluster_bitwise_atomic,
+ ptr @test_distributed_shared_cluster_cmpxchg
+], section "llvm.metadata"
declare ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i32)
declare i1 @llvm.nvvm.isspacep.shared.cluster(ptr)
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
declare ptr @llvm.nvvm.mapa(ptr, i32)
-define i32 @test_distributed_shared_cluster(ptr %ptr, ptr addrspace(3) %smem_ptr) local_unnamed_addr {
-; CHECK-LABEL: test_distributed_shared_cluster(
+; Common setup for distributed shared memory cluster addressing
+define i32 @test_distributed_shared_cluster_common(ptr %ptr, ptr addrspace(3) %smem_ptr) local_unnamed_addr {
+; CHECK-LABEL: test_distributed_shared_cluster_common(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<3>;
+; CHECK-NEXT: .reg .b32 %r<8>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd1, [test_distributed_shared_cluster_common_param_0];
+; CHECK-NEXT: ld.param.u64 %rd2, [test_distributed_shared_cluster_common_param_1];
+; CHECK-NEXT: mov.u32 %r1, %ctaid.x;
+; CHECK-NEXT: xor.b32 %r2, %r1, 1;
+; CHECK-NEXT: isspacep.shared::cluster %p1, %rd1;
+; CHECK-NEXT: mapa.u64 %rd3, %rd1, %r2;
+; CHECK-NEXT: isspacep.shared::cluster %p2, %rd3;
+; CHECK-NEXT: mapa.shared::cluster.u64 %rd4, %rd2, %r2;
+; CHECK-NEXT: ld.shared::cluster.u32 %r3, [%rd4];
+; CHECK-NEXT: add.s32 %r4, %r3, 42;
+; CHECK-NEXT: st.shared::cluster.u32 [%rd4], %r4;
+; CHECK-NEXT: selp.b32 %r5, 1, 0, %p1;
+; CHECK-NEXT: selp.b32 %r6, 1, 0, %p2;
+; CHECK-NEXT: add.s32 %r7, %r5, %r6;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r7;
+; CHECK-NEXT: ret;
+entry:
+ %0 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
+ %1 = xor i32 %0, 1
+ %2 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %ptr)
+ %3 = tail call ptr @llvm.nvvm.mapa(ptr %ptr, i32 %1)
+ %4 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %3)
+ %dsmem_ptr = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %smem_ptr, i32 %1)
+
+ ; Add load and store to the distributed shared memory cluster
+ %loaded_val = load i32, ptr addrspace(7) %dsmem_ptr
+ %updated_val = add i32 %loaded_val, 42
+ store i32 %updated_val, ptr addrspace(7) %dsmem_ptr
+
+ ; Return value preserves the isspacep test results plus the value operation
+ %5 = zext i1 %2 to i32
+ %6 = zext i1 %4 to i32
+ %ret = add i32 %5, %6
+ ret i32 %ret
+}
+
+; Floating point atomic operations tests
+define void @test_distributed_shared_cluster_float_atomic(ptr addrspace(7) %dsmem_ptr) local_unnamed_addr {
+; CHECK-LABEL: test_distributed_shared_cluster_float_atomic(
; CHECK: {
-; CHECK-NEXT: .reg .pred %p<13>;
; CHECK-NEXT: .reg .b16 %rs<5>;
-; CHECK-NEXT: .reg .b32 %r<69>;
; CHECK-NEXT: .reg .f32 %f<2>;
-; CHECK-NEXT: .reg .b64 %rd<24>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-NEXT: .reg .f64 %fd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0: // %entry
-; CHECK-NEXT: ld.param.u64 %rd2, [test_distributed_shared_cluster_param_0];
-; CHECK-NEXT: ld.param.u64 %rd3, [test_distributed_shared_cluster_param_1];
-; CHECK-NEXT: mov.u32 %r24, %ctaid.x;
-; CHECK-NEXT: xor.b32 %r25, %r24, 1;
-; CHECK-NEXT: isspacep.shared::cluster %p1, %rd2;
-; CHECK-NEXT: mapa.u64 %rd4, %rd2, %r25;
-; CHECK-NEXT: isspacep.shared::cluster %p2, %rd4;
-; CHECK-NEXT: mapa.shared::cluster.u64 %rd5, %rd3, %r25;
+; CHECK-NEXT: ld.param.u64 %rd1, [test_distributed_shared_cluster_float_atomic_param_0];
; CHECK-NEXT: mov.b16 %rs1, 0x3C00;
-; CHECK-NEXT: atom.shared::cluster.add.noftz.f16 %rs2, [%rd5], %rs1;
+; CHECK-NEXT: atom.shared::cluster.add.noftz.f16 %rs2, [%rd1], %rs1;
; CHECK-NEXT: mov.b16 %rs3, 0x3F80;
-; CHECK-NEXT: atom.shared::cluster.add.noftz.bf16 %rs4, [%rd5], %rs3;
-; CHECK-NEXT: atom.shared::cluster.add.f32 %f1, [%rd5], 0f3F800000;
-; CHECK-NEXT: atom.shared::cluster.add.f64 %fd1, [%rd5], 0d3FF0000000000000;
-; CHECK-NEXT: atom.shared::cluster.add.u32 %r26, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.add.u64 %rd6, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.exch.b32 %r27, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.exch.b64 %rd7, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.min.s32 %r28, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.min.s64 %rd8, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.min.u32 %r29, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.min.u64 %rd9, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.max.s32 %r30, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.max.s64 %rd10, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.max.u32 %r31, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.max.u64 %rd11, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.inc.u32 %r32, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.dec.u32 %r33, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.and.b32 %r34, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.and.b64 %rd12, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.or.b32 %r35, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.or.b64 %rd13, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.xor.b32 %r36, [%rd5], 1;
-; CHECK-NEXT: atom.shared::cluster.xor.b64 %rd14, [%rd5], 1;
-; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r37, [%rd5], 1, 0;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r38, [%rd5], 1, 0;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r39, [%rd5], 1, 0;
-; CHECK-NEXT: atom.release.shared::cluster.cas.b32 %r40, [%rd5], 1, 0;
-; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b32 %r41, [%rd5], 1, 0;
-; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b32 %r42, [%rd5], 1, 0;
+; CHECK-NEXT: atom.shared::cluster.add.noftz.bf16 %rs4, [%rd1], %rs3;
+; CHECK-NEXT: atom.shared::cluster.add.f32 %f1, [%rd1], 0f3F800000;
+; CHECK-NEXT: atom.shared::cluster.add.f64 %fd1, [%rd1], 0d3FF0000000000000;
+; CHECK-NEXT: ret;
+entry:
+ ; Floating point atomic operations
+ %0 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, half 1.000000e+00 seq_cst
+ %1 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, bfloat 1.000000e+00 seq_cst
+ %2 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, float 1.000000e+00 seq_cst
+ %3 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, double 1.000000e+00 seq_cst
+
+ ret void
+}
+
+; Integer atomic operations tests
+define void @test_distributed_shared_cluster_int_atomic(ptr addrspace(7) %dsmem_ptr) local_unnamed_addr {
+; CHECK-LABEL: test_distributed_shared_cluster_int_atomic(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-NEXT: .reg .b64 %rd<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd1, [test_distributed_shared_cluster_int_atomic_param_0];
+; CHECK-NEXT: atom.shared::cluster.add.u32 %r1, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.add.u64 %rd2, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.exch.b32 %r2, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.exch.b64 %rd3, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.min.s32 %r3, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.min.s64 %rd4, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.min.u32 %r4, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.min.u64 %rd5, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.max.s32 %r5, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.max.s64 %rd6, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.max.u32 %r6, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.max.u64 %rd7, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.inc.u32 %r7, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.dec.u32 %r8, [%rd1], 1;
+; CHECK-NEXT: ret;
+entry:
+ ; Integer add operations
+ %0 = atomicrmw add ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %1 = atomicrmw add ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Exchange operations
+ %2 = atomicrmw xchg ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %3 = atomicrmw xchg ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Min operations (signed and unsigned)
+ %4 = atomicrmw min ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %5 = atomicrmw min ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %6 = atomicrmw umin ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %7 = atomicrmw umin ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Max operations (signed and unsigned)
+ %8 = atomicrmw max ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %9 = atomicrmw max ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %10 = atomicrmw umax ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %11 = atomicrmw umax ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Inc/Dec operations (32-bit only)
+ %12 = atomicrmw uinc_wrap ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %13 = atomicrmw udec_wrap ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+
+ ret void
+}
+
+; Bitwise atomic operations tests
+define void @test_distributed_shared_cluster_bitwise_atomic(ptr addrspace(7) %dsmem_ptr) local_unnamed_addr {
+; CHECK-LABEL: test_distributed_shared_cluster_bitwise_atomic(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd1, [test_distributed_shared_cluster_bitwise_atomic_param_0];
+; CHECK-NEXT: atom.shared::cluster.and.b32 %r1, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.and.b64 %rd2, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.or.b32 %r2, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.or.b64 %rd3, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.xor.b32 %r3, [%rd1], 1;
+; CHECK-NEXT: atom.shared::cluster.xor.b64 %rd4, [%rd1], 1;
+; CHECK-NEXT: ret;
+entry:
+ ; Bitwise operations
+ %0 = atomicrmw and ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %1 = atomicrmw and ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %2 = atomicrmw or ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %3 = atomicrmw or ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %4 = atomicrmw xor ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %5 = atomicrmw xor ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ret void
+}
+
+; Compare-exchange operations tests
+define void @test_distributed_shared_cluster_cmpxchg(ptr addrspace(7) %dsmem_ptr) local_unnamed_addr {
+; CHECK-LABEL: test_distributed_shared_cluster_cmpxchg(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<11>;
+; CHECK-NEXT: .reg .b32 %r<53>;
+; CHECK-NEXT: .reg .b64 %rd<12>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd2, [test_distributed_shared_cluster_cmpxchg_param_0];
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r24, [%rd2], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r25, [%rd2], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r26, [%rd2], 1, 0;
+; CHECK-NEXT: atom.release.shared::cluster.cas.b32 %r27, [%rd2], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b32 %r28, [%rd2], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b32 %r29, [%rd2], 1, 0;
; CHECK-NEXT: fence.sc.sys;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r43, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r30, [%rd2], 1, 0;
; CHECK-NEXT: fence.sc.sys;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r44, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r31, [%rd2], 1, 0;
; CHECK-NEXT: fence.sc.sys;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r45, [%rd5], 1, 0;
-; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b64 %rd15, [%rd5], 1, 0;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd16, [%rd5], 1, 0;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd17, [%rd5], 1, 0;
-; CHECK-NEXT: atom.release.shared::cluster.cas.b64 %rd18, [%rd5], 1, 0;
-; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b64 %rd19, [%rd5], 1, 0;
-; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b64 %rd20, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r32, [%rd2], 1, 0;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b64 %rd3, [%rd2], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd4, [%rd2], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd5, [%rd2], 1, 0;
+; CHECK-NEXT: atom.release.shared::cluster.cas.b64 %rd6, [%rd2], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b64 %rd7, [%rd2], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b64 %rd8, [%rd2], 1, 0;
; CHECK-NEXT: fence.sc.sys;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd21, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd9, [%rd2], 1, 0;
; CHECK-NEXT: fence.sc.sys;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd22, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd10, [%rd2], 1, 0;
; CHECK-NEXT: fence.sc.sys;
-; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd23, [%rd5], 1, 0;
-; CHECK-NEXT: and.b64 %rd1, %rd5, -4;
-; CHECK-NEXT: cvt.u32.u64 %r46, %rd5;
-; CHECK-NEXT: and.b32 %r47, %r46, 3;
-; CHECK-NEXT: shl.b32 %r1, %r47, 3;
-; CHECK-NEXT: mov.b32 %r48, 65535;
-; CHECK-NEXT: shl.b32 %r49, %r48, %r1;
-; CHECK-NEXT: not.b32 %r2, %r49;
-; CHECK-NEXT: mov.b32 %r50, 1;
-; CHECK-NEXT: shl.b32 %r3, %r50, %r1;
-; CHECK-NEXT: ld.shared::cluster.u32 %r51, [%rd1];
-; CHECK-NEXT: and.b32 %r64, %r51, %r2;
-; CHECK-NEXT: $L__BB0_1: // %partword.cmpxchg.loop33
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd11, [%rd2], 1, 0;
+; CHECK-NEXT: and.b64 %rd1, %rd2, -4;
+; CHECK-NEXT: cvt.u32.u64 %r33, %rd2;
+; CHECK-NEXT: and.b32 %r34, %r33, 3;
+; CHECK-NEXT: shl.b32 %r1, %r34, 3;
+; CHECK-NEXT: mov.b32 %r35, 65535;
+; CHECK-NEXT: shl.b32 %r36, %r35, %r1;
+; CHECK-NEXT: not.b32 %r2, %r36;
+; CHECK-NEXT: mov.b32 %r37, 1;
+; CHECK-NEXT: shl.b32 %r3, %r37, %r1;
+; CHECK-NEXT: ld.shared::cluster.u32 %r38, [%rd1];
+; CHECK-NEXT: and.b32 %r48, %r38, %r2;
+; CHECK-NEXT: $L__BB4_1: // %partword.cmpxchg.loop33
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: or.b32 %r52, %r64, %r3;
-; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r6, [%rd1], %r52, %r64;
-; CHECK-NEXT: setp.eq.s32 %p3, %r6, %r52;
-; CHECK-NEXT: @%p3 bra $L__BB0_3;
+; CHECK-NEXT: or.b32 %r39, %r48, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r6, [%rd1], %r39, %r48;
+; CHECK-NEXT: setp.eq.s32 %p1, %r6, %r39;
+; CHECK-NEXT: @%p1 bra $L__BB4_3;
; CHECK-NEXT: // %bb.2: // %partword.cmpxchg.failure32
-; CHECK-NEXT: // in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB4_1 Depth=1
; CHECK-NEXT: and.b32 %r7, %r6, %r2;
-; CHECK-NEXT: setp.ne.s32 %p4, %r64, %r7;
-; CHECK-NEXT: mov.b32 %r64, %r7;
-; CHECK-NEXT: @%p4 bra $L__BB0_1;
-; CHECK-NEXT: $L__BB0_3: // %partword.cmpxchg.end31
-; CHECK-NEXT: ld.shared::cluster.u32 %r53, [%rd1];
-; CHECK-NEXT: and.b32 %r65, %r53, %r2;
-; CHECK-NEXT: $L__BB0_4: // %partword.cmpxchg.loop23
+; CHECK-NEXT: setp.ne.s32 %p2, %r48, %r7;
+; CHECK-NEXT: mov.b32 %r48, %r7;
+; CHECK-NEXT: @%p2 bra $L__BB4_1;
+; CHECK-NEXT: $L__BB4_3: // %partword.cmpxchg.end31
+; CHECK-NEXT: ld.shared::cluster.u32 %r40, [%rd1];
+; CHECK-NEXT: and.b32 %r49, %r40, %r2;
+; CHECK-NEXT: $L__BB4_4: // %partword.cmpxchg.loop23
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: or.b32 %r54, %r65, %r3;
-; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r10, [%rd1], %r54, %r65;
-; CHECK-NEXT: setp.eq.s32 %p5, %r10, %r54;
-; CHECK-NEXT: @%p5 bra $L__BB0_6;
+; CHECK-NEXT: or.b32 %r41, %r49, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r10, [%rd1], %r41, %r49;
+; CHECK-NEXT: setp.eq.s32 %p3, %r10, %r41;
+; CHECK-NEXT: @%p3 bra $L__BB4_6;
; CHECK-NEXT: // %bb.5: // %partword.cmpxchg.failure22
-; CHECK-NEXT: // in Loop: Header=BB0_4 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB4_4 Depth=1
; CHECK-NEXT: and.b32 %r11, %r10, %r2;
-; CHECK-NEXT: setp.ne.s32 %p6, %r65, %r11;
-; CHECK-NEXT: mov.b32 %r65, %r11;
-; CHECK-NEXT: @%p6 bra $L__BB0_4;
-; CHECK-NEXT: $L__BB0_6: // %partword.cmpxchg.end21
+; CHECK-NEXT: setp.ne.s32 %p4, %r49, %r11;
+; CHECK-NEXT: mov.b32 %r49, %r11;
+; CHECK-NEXT: @%p4 bra $L__BB4_4;
+; CHECK-NEXT: $L__BB4_6: // %partword.cmpxchg.end21
; CHECK-NEXT: fence.acq_rel.sys;
; CHECK-NEXT: fence.acq_rel.sys;
-; CHECK-NEXT: ld.shared::cluster.u32 %r55, [%rd1];
-; CHECK-NEXT: and.b32 %r66, %r55, %r2;
-; CHECK-NEXT: $L__BB0_7: // %partword.cmpxchg.loop13
+; CHECK-NEXT: ld.shared::cluster.u32 %r42, [%rd1];
+; CHECK-NEXT: and.b32 %r50, %r42, %r2;
+; CHECK-NEXT: $L__BB4_7: // %partword.cmpxchg.loop13
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: or.b32 %r56, %r66, %r3;
-; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r14, [%rd1], %r56, %r66;
-; CHECK-NEXT: setp.eq.s32 %p7, %r14, %r56;
-; CHECK-NEXT: @%p7 bra $L__BB0_9;
+; CHECK-NEXT: or.b32 %r43, %r50, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r14, [%rd1], %r43, %r50;
+; CHECK-NEXT: setp.eq.s32 %p5, %r14, %r43;
+; CHECK-NEXT: @%p5 bra $L__BB4_9;
; CHECK-NEXT: // %bb.8: // %partword.cmpxchg.failure12
-; CHECK-NEXT: // in Loop: Header=BB0_7 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB4_7 Depth=1
; CHECK-NEXT: and.b32 %r15, %r14, %r2;
-; CHECK-NEXT: setp.ne.s32 %p8, %r66, %r15;
-; CHECK-NEXT: mov.b32 %r66, %r15;
-; CHECK-NEXT: @%p8 bra $L__BB0_7;
-; CHECK-NEXT: $L__BB0_9: // %partword.cmpxchg.end11
+; CHECK-NEXT: setp.ne.s32 %p6, %r50, %r15;
+; CHECK-NEXT: mov.b32 %r50, %r15;
+; CHECK-NEXT: @%p6 bra $L__BB4_7;
+; CHECK-NEXT: $L__BB4_9: // %partword.cmpxchg.end11
; CHECK-NEXT: fence.acq_rel.sys;
-; CHECK-NEXT: ld.shared::cluster.u32 %r57, [%rd1];
-; CHECK-NEXT: and.b32 %r67, %r57, %r2;
-; CHECK-NEXT: $L__BB0_10: // %partword.cmpxchg.loop3
+; CHECK-NEXT: ld.shared::cluster.u32 %r44, [%rd1];
+; CHECK-NEXT: and.b32 %r51, %r44, %r2;
+; CHECK-NEXT: $L__BB4_10: // %partword.cmpxchg.loop3
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: or.b32 %r58, %r67, %r3;
-; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r18, [%rd1], %r58, %r67;
-; CHECK-NEXT: setp.eq.s32 %p9, %r18, %r58;
-; CHECK-NEXT: @%p9 bra $L__BB0_12;
+; CHECK-NEXT: or.b32 %r45, %r51, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r18, [%rd1], %r45, %r51;
+; CHECK-NEXT: setp.eq.s32 %p7, %r18, %r45;
+; CHECK-NEXT: @%p7 bra $L__BB4_12;
; CHECK-NEXT: // %bb.11: // %partword.cmpxchg.failure2
-; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB4_10 Depth=1
; CHECK-NEXT: and.b32 %r19, %r18, %r2;
-; CHECK-NEXT: setp.ne.s32 %p10, %r67, %r19;
-; CHECK-NEXT: mov.b32 %r67, %r19;
-; CHECK-NEXT: @%p10 bra $L__BB0_10;
-; CHECK-NEXT: $L__BB0_12: // %partword.cmpxchg.end1
+; CHECK-NEXT: setp.ne.s32 %p8, %r51, %r19;
+; CHECK-NEXT: mov.b32 %r51, %r19;
+; CHECK-NEXT: @%p8 bra $L__BB4_10;
+; CHECK-NEXT: $L__BB4_12: // %partword.cmpxchg.end1
; CHECK-NEXT: fence.acq_rel.sys;
; CHECK-NEXT: fence.sc.sys;
-; CHECK-NEXT: ld.shared::cluster.u32 %r59, [%rd1];
-; CHECK-NEXT: and.b32 %r68, %r59, %r2;
-; CHECK-NEXT: $L__BB0_13: // %partword.cmpxchg.loop
+; CHECK-NEXT: ld.shared::cluster.u32 %r46, [%rd1];
+; CHECK-NEXT: and.b32 %r52, %r46, %r2;
+; CHECK-NEXT: $L__BB4_13: // %partword.cmpxchg.loop
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: or.b32 %r60, %r68, %r3;
-; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r22, [%rd1], %r60, %r68;
-; CHECK-NEXT: setp.eq.s32 %p11, %r22, %r60;
-; CHECK-NEXT: @%p11 bra $L__BB0_15;
+; CHECK-NEXT: or.b32 %r47, %r52, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r22, [%rd1], %r47, %r52;
+; CHECK-NEXT: setp.eq.s32 %p9, %r22, %r47;
+; CHECK-NEXT: @%p9 bra $L__BB4_15;
; CHECK-NEXT: // %bb.14: // %partword.cmpxchg.failure
-; CHECK-NEXT: // in Loop: Header=BB0_13 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB4_13 Depth=1
; CHECK-NEXT: and.b32 %r23, %r22, %r2;
-; CHECK-NEXT: setp.ne.s32 %p12, %r68, %r23;
-; CHECK-NEXT: mov.b32 %r68, %r23;
-; CHECK-NEXT: @%p12 bra $L__BB0_13;
-; CHECK-NEXT: $L__BB0_15: // %partword.cmpxchg.end
+; CHECK-NEXT: setp.ne.s32 %p10, %r52, %r23;
+; CHECK-NEXT: mov.b32 %r52, %r23;
+; CHECK-NEXT: @%p10 bra $L__BB4_13;
+; CHECK-NEXT: $L__BB4_15: // %partword.cmpxchg.end
; CHECK-NEXT: fence.acq_rel.sys;
-; CHECK-NEXT: selp.b32 %r61, 1, 0, %p1;
-; CHECK-NEXT: selp.b32 %r62, 1, 0, %p2;
-; CHECK-NEXT: add.s32 %r63, %r61, %r62;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r63;
; CHECK-NEXT: ret;
entry:
- %0 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
- %1 = xor i32 %0, 1
- %2 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %ptr)
- %3 = tail call ptr @llvm.nvvm.mapa(ptr %ptr, i32 %1)
- %4 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %3)
- %dsmem_ptr = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %smem_ptr, i32 %1)
-
- ; Floating point atomic operations
- %5 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, half 1.000000e+00 seq_cst
- %6 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, bfloat 1.000000e+00 seq_cst
- %7 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, float 1.000000e+00 seq_cst
- %8 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, double 1.000000e+00 seq_cst
-
- ; Integer add operations
- %9 = atomicrmw add ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %10 = atomicrmw add ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
-
- ; Exchange operations
- %11 = atomicrmw xchg ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %12 = atomicrmw xchg ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
-
- ; Min operations (signed and unsigned)
- %13 = atomicrmw min ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %14 = atomicrmw min ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
- %15 = atomicrmw umin ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %16 = atomicrmw umin ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
-
- ; Max operations (signed and unsigned)
- %17 = atomicrmw max ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %18 = atomicrmw max ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
- %19 = atomicrmw umax ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %20 = atomicrmw umax ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
-
- ; Inc/Dec operations (32-bit only)
- %21 = atomicrmw uinc_wrap ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %22 = atomicrmw udec_wrap ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
-
- ; Bitwise operations
- %23 = atomicrmw and ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %24 = atomicrmw and ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
- %25 = atomicrmw or ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %26 = atomicrmw or ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
- %27 = atomicrmw xor ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
- %28 = atomicrmw xor ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
-
; Compare-exchange operation - all memory ordering combinations for 32-bit
- %29 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 monotonic monotonic
- %30 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acquire monotonic
- %31 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acquire acquire
- %32 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 release monotonic
- %33 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acq_rel monotonic
- %34 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acq_rel acquire
- %35 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst monotonic
- %36 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst acquire
- %37 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst seq_cst
+ %0 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 monotonic monotonic
+ %1 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acquire monotonic
+ %2 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acquire acquire
+ %3 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 release monotonic
+ %4 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acq_rel monotonic
+ %5 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acq_rel acquire
+ %6 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst monotonic
+ %7 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst acquire
+ %8 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst seq_cst
; Compare-exchange operation - all memory ordering combinations for 64-bit
- %38 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 monotonic monotonic
- %39 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acquire monotonic
- %40 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acquire acquire
- %41 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 release monotonic
- %42 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acq_rel monotonic
- %43 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acq_rel acquire
- %44 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst monotonic
- %45 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst acquire
- %46 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst seq_cst
+ %9 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 monotonic monotonic
+ %10 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acquire monotonic
+ %11 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acquire acquire
+ %12 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 release monotonic
+ %13 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acq_rel monotonic
+ %14 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acq_rel acquire
+ %15 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst monotonic
+ %16 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst acquire
+ %17 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst seq_cst
; Compare-exchange operation - 16-bit
- %47 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 monotonic monotonic
- %48 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 acquire acquire
- %49 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 release monotonic
- %50 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 acq_rel acquire
- %51 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 seq_cst seq_cst
+ %18 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 monotonic monotonic
+ %19 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 acquire acquire
+ %20 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 release monotonic
+ %21 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 acq_rel acquire
+ %22 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 seq_cst seq_cst
- ; Return value preserves the isspacep test results
- %52 = zext i1 %2 to i32
- %53 = zext i1 %4 to i32
- %ret = add i32 %52, %53
- ret i32 %ret
+ ret void
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
index cd6f3af2e9241..f1eae15d6bf18 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
@@ -45,7 +45,7 @@ enum NVVMMemorySpace {
/// variants from sm100 onwards.
kTensorMemorySpace = 6,
/// Distributed shared memory space identifier.
- /// Distributed shared memory is available only in sm80+.
+ /// Distributed shared memory is available only in sm90+.
kSharedClusterMemorySpace = 7,
};
>From f7a17c42139b38aa97d7f9cd97d2e903004c394c Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Thu, 17 Apr 2025 22:53:25 -0700
Subject: [PATCH 4/9] remove undefs in tests
---
.../Assembler/auto_upgrade_nvvm_intrinsics.ll | 32 +++++++++----------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
index e80ef7e9e1e72..9b1dd7088f1dd 100644
--- a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -287,27 +287,27 @@ define void @nvvm_cp_async_bulk_intrinsics(ptr addrspace(3) %dst, ptr addrspace(
; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_im2col
define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %3, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %3, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 0, i64 0, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 0, i64 0, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 0, i64 0, i1 0, i1 0)
ret void
}
; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_tile
define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %3, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %5, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %3, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %5, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 0, i64 0, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 0, i64 0, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 0, i64 0, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 0, i64 0, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 0, i64 0, i1 0, i1 0)
ret void
}
>From 56b62094e94db9213a2c80ca39270b56d8dc6e7c Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Fri, 18 Apr 2025 17:13:21 -0700
Subject: [PATCH 5/9] review feedback
---
llvm/docs/NVPTXUsage.rst | 27 ++++
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 32 ++---
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 44 +++---
.../Target/NVPTX/NVPTXTargetTransformInfo.cpp | 5 +-
.../test/CodeGen/NVPTX/addrspacecast-ptx64.ll | 127 +++++++++++++++---
.../NVPTX/distributed-shared-cluster.ll | 8 --
.../InferAddressSpaces/NVPTX/isspacep.ll | 13 +-
7 files changed, 171 insertions(+), 85 deletions(-)
diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index 7bd68c3fe4f7a..c1426823d87af 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -108,6 +108,7 @@ The NVPTX back-end uses the following address space mapping:
3 Shared
4 Constant
5 Local
+ 7 Shared Cluster
============= ======================
Every global variable and pointer type is assigned to one of these address
@@ -306,6 +307,32 @@ If the given pointer in the generic address space refers to memory which falls
within the state space of the intrinsic (and therefore could be safely address
space casted to this space), 1 is returned, otherwise 0 is returned.
+'``llvm.nvvm.mapa.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+.. code-block:: llvm
+
+ declare ptr @llvm.nvvm.mapa(ptr %p, i32 %rank)
+ declare ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p, i32 %rank)
+
+Overview:
+"""""""""
+
+The '``llvm.nvvm.mapa.*``' intrinsics map a shared memory pointer ``p`` of another CTA with ``%rank`` to the current CTA.
+The ``llvm.nvvm.mapa`` form expects a generic pointer to shared memory and returns a generic pointer to shared cluster memory.
+The ``llvm.nvvm.mapa.shared.cluster`` form expects a pointer to shared memory and returns a pointer to shared cluster memory.
+They corresponds directly to the ``mapa`` and ``mapa.shared.cluster`` PTX instructions.
+
+Semantics:
+""""""""""
+
+If the given pointer in the generic address space refers to memory which falls
+within the state space of the intrinsic (and therefore could be safely address
+space casted to this space), 1 is returned, otherwise 0 is returned.
+
Arithmetic Intrinsics
---------------------
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index ef768fabdb637..58bb093e6ec90 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -3046,36 +3046,20 @@ SDValue NVPTXTargetLowering::LowerADDRSPACECAST(SDValue Op,
DestAS != llvm::ADDRESS_SPACE_GENERIC) {
// Shared and SharedCluster can be converted to each other through generic
// space
- if (SrcAS == llvm::ADDRESS_SPACE_SHARED &&
- DestAS == llvm::ADDRESS_SPACE_SHARED_CLUSTER) {
+ if ((SrcAS == llvm::ADDRESS_SPACE_SHARED &&
+ DestAS == llvm::ADDRESS_SPACE_SHARED_CLUSTER) ||
+ (SrcAS == llvm::ADDRESS_SPACE_SHARED_CLUSTER &&
+ DestAS == llvm::ADDRESS_SPACE_SHARED)) {
const MVT GenerictVT =
getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_GENERIC);
- const MVT SharedClusterVT =
- getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_SHARED_CLUSTER);
- SDValue GenericConversion =
- DAG.getAddrSpaceCast(SDLoc(), GenerictVT, Op.getOperand(0),
- ADDRESS_SPACE_SHARED, ADDRESS_SPACE_GENERIC);
+ const MVT OutputVT = getPointerTy(DAG.getDataLayout(), DestAS);
+ SDValue GenericConversion = DAG.getAddrSpaceCast(
+ SDLoc(), GenerictVT, Op.getOperand(0), SrcAS, ADDRESS_SPACE_GENERIC);
SDValue SharedClusterConversion = DAG.getAddrSpaceCast(
- SDLoc(), SharedClusterVT, GenericConversion, ADDRESS_SPACE_GENERIC,
- ADDRESS_SPACE_SHARED_CLUSTER);
+ SDLoc(), OutputVT, GenericConversion, ADDRESS_SPACE_GENERIC, DestAS);
return SharedClusterConversion;
}
- if (SrcAS == llvm::ADDRESS_SPACE_SHARED_CLUSTER &&
- DestAS == llvm::ADDRESS_SPACE_SHARED) {
- const MVT GenerictVT =
- getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_GENERIC);
- const MVT SharedVT =
- getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_SHARED);
- SDValue GenericConversion = DAG.getAddrSpaceCast(
- SDLoc(), GenerictVT, Op.getOperand(0), ADDRESS_SPACE_SHARED_CLUSTER,
- ADDRESS_SPACE_GENERIC);
- SDValue SharedConversion =
- DAG.getAddrSpaceCast(SDLoc(), SharedVT, GenericConversion,
- ADDRESS_SPACE_GENERIC, ADDRESS_SPACE_SHARED);
- return SharedConversion;
- }
-
return DAG.getUNDEF(Op.getValueType());
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 8c542af4dc3e1..0b2c04ee3b729 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -2387,28 +2387,28 @@ def INT_PTX_LDG_G_v4i32_ELE : VLDG_G_ELE_V4<"u32", Int32Regs>;
def INT_PTX_LDG_G_v4f32_ELE : VLDG_G_ELE_V4<"f32", Float32Regs>;
-multiclass NG_TO_G<string Str, list<Predicate> Preds = []> {
- def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
- "cvta." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
- def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- "cvta." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
-}
-
-multiclass NG_TO_G_64<string Str, list<Predicate> Preds = []> {
- def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- "cvta." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
-}
-
-multiclass G_TO_NG<string Str, list<Predicate> Preds = []> {
- def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
- "cvta.to." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
- def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- "cvta.to." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
+multiclass NG_TO_G<string Str, bit Supports32 = 1, list<Predicate> Preds = []> {
+ foreach bitwidth = !if(Supports32, ["32", "64"], ["64"]) in {
+ if !eq(bitwidth, "32") then {
+ def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ "cvta." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
+ } else if !eq(bitwidth, "64") then {
+ def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "cvta." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
+ }
+ }
}
-multiclass G_TO_NG_64<string Str, list<Predicate> Preds = []> {
- def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- "cvta.to." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
+multiclass G_TO_NG<string Str, bit Supports32 = 1, list<Predicate> Preds = []> {
+ foreach bitwidth = !if(Supports32, ["32", "64"], ["64"]) in {
+ if !eq(bitwidth, "32") then {
+ def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ "cvta.to." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
+ } else if !eq(bitwidth, "64") then {
+ def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "cvta.to." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
+ }
+ }
}
foreach space = ["local", "shared", "global", "const", "param"] in {
@@ -2417,8 +2417,8 @@ foreach space = ["local", "shared", "global", "const", "param"] in {
}
foreach space = ["shared::cluster"] in {
- defm cvta_#space : NG_TO_G<space>;
- defm cvta_to_#space : G_TO_NG<space>;
+ defm cvta_#space : NG_TO_G<space, false, [hasclusters]>;
+ defm cvta_to_#space : G_TO_NG<space, false, [hasclusters]>;
}
def : Pat<(int_nvvm_ptr_param_to_gen i32:$src),
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index b5f7d90cd29d9..e662728ae9295 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -426,10 +426,7 @@ static std::optional<bool> evaluateIsSpace(Intrinsic::ID IID, unsigned AS) {
case Intrinsic::nvvm_isspacep_shared:
return AS == NVPTXAS::ADDRESS_SPACE_SHARED;
case Intrinsic::nvvm_isspacep_shared_cluster:
- // We can't tell shared from shared_cluster at compile time from AS alone,
- // but it can't be either is AS is not shared.
- return AS == NVPTXAS::ADDRESS_SPACE_SHARED ? std::nullopt
- : std::optional{false};
+ return AS == NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER;
case Intrinsic::nvvm_isspacep_const:
return AS == NVPTXAS::ADDRESS_SPACE_CONST;
default:
diff --git a/llvm/test/CodeGen/NVPTX/addrspacecast-ptx64.ll b/llvm/test/CodeGen/NVPTX/addrspacecast-ptx64.ll
index 84ee45cb6ec06..afd0a7fded64e 100644
--- a/llvm/test/CodeGen/NVPTX/addrspacecast-ptx64.ll
+++ b/llvm/test/CodeGen/NVPTX/addrspacecast-ptx64.ll
@@ -1,15 +1,36 @@
-; RUN: llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_80 | FileCheck %s -check-prefixes=ALL,NOPTRCONV,CLS64
-; RUN: llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_80 --nvptx-short-ptr | FileCheck %s -check-prefixes=ALL,PTRCONV,CLS64
-; RUN: %if ptxas-12.8 %{ llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_80 | %ptxas-verify %}
-; RUN: %if ptxas-12.8 %{ llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_80 --nvptx-short-ptr | %ptxas-verify %}
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx78 | FileCheck %s -check-prefixes=NOPTRCONV
+; RUN: llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx78 --nvptx-short-ptr | FileCheck %s -check-prefixes=PTRCONV
+; RUN: %if ptxas-12.8 %{ llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx78 | %ptxas-verify %}
+; RUN: %if ptxas-12.8 %{ llc -O0 < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx78 --nvptx-short-ptr | %ptxas-verify %}
; ALL-LABEL: conv_shared_cluster_to_generic
define i32 @conv_shared_cluster_to_generic(ptr addrspace(7) %ptr) {
; CLS32: cvta.shared::cluster.u32
-; PTRCONV: cvt.u64.u32
-; NOPTRCONV-NOT: cvt.u64.u32
-; CLS64: cvta.shared::cluster.u64
-; ALL: ld.u32
+; NOPTRCONV-LABEL: conv_shared_cluster_to_generic(
+; NOPTRCONV: {
+; NOPTRCONV-NEXT: .reg .b32 %r<2>;
+; NOPTRCONV-NEXT: .reg .b64 %rd<3>;
+; NOPTRCONV-EMPTY:
+; NOPTRCONV-NEXT: // %bb.0:
+; NOPTRCONV-NEXT: ld.param.u64 %rd1, [conv_shared_cluster_to_generic_param_0];
+; NOPTRCONV-NEXT: cvta.shared::cluster.u64 %rd2, %rd1;
+; NOPTRCONV-NEXT: ld.u32 %r1, [%rd2];
+; NOPTRCONV-NEXT: st.param.b32 [func_retval0], %r1;
+; NOPTRCONV-NEXT: ret;
+;
+; PTRCONV-LABEL: conv_shared_cluster_to_generic(
+; PTRCONV: {
+; PTRCONV-NEXT: .reg .b32 %r<3>;
+; PTRCONV-NEXT: .reg .b64 %rd<3>;
+; PTRCONV-EMPTY:
+; PTRCONV-NEXT: // %bb.0:
+; PTRCONV-NEXT: ld.param.u32 %r1, [conv_shared_cluster_to_generic_param_0];
+; PTRCONV-NEXT: cvt.u64.u32 %rd1, %r1;
+; PTRCONV-NEXT: cvta.shared::cluster.u64 %rd2, %rd1;
+; PTRCONV-NEXT: ld.u32 %r2, [%rd2];
+; PTRCONV-NEXT: st.param.b32 [func_retval0], %r2;
+; PTRCONV-NEXT: ret;
%genptr = addrspacecast ptr addrspace(7) %ptr to ptr
%val = load i32, ptr %genptr
ret i32 %val
@@ -18,10 +39,30 @@ define i32 @conv_shared_cluster_to_generic(ptr addrspace(7) %ptr) {
; ALL-LABEL: conv_generic_to_shared_cluster
define i32 @conv_generic_to_shared_cluster(ptr %ptr) {
; CLS32: cvta.to.shared::cluster.u32
-; CLS64: cvta.to.shared::cluster.u64
-; PTRCONV: cvt.u32.u64
-; NOPTRCONV-NOT: cvt.u32.u64
-; ALL: ld.shared::cluster.u32
+; NOPTRCONV-LABEL: conv_generic_to_shared_cluster(
+; NOPTRCONV: {
+; NOPTRCONV-NEXT: .reg .b32 %r<2>;
+; NOPTRCONV-NEXT: .reg .b64 %rd<3>;
+; NOPTRCONV-EMPTY:
+; NOPTRCONV-NEXT: // %bb.0:
+; NOPTRCONV-NEXT: ld.param.u64 %rd1, [conv_generic_to_shared_cluster_param_0];
+; NOPTRCONV-NEXT: cvta.to.shared::cluster.u64 %rd2, %rd1;
+; NOPTRCONV-NEXT: ld.shared::cluster.u32 %r1, [%rd2];
+; NOPTRCONV-NEXT: st.param.b32 [func_retval0], %r1;
+; NOPTRCONV-NEXT: ret;
+;
+; PTRCONV-LABEL: conv_generic_to_shared_cluster(
+; PTRCONV: {
+; PTRCONV-NEXT: .reg .b32 %r<3>;
+; PTRCONV-NEXT: .reg .b64 %rd<3>;
+; PTRCONV-EMPTY:
+; PTRCONV-NEXT: // %bb.0:
+; PTRCONV-NEXT: ld.param.u64 %rd1, [conv_generic_to_shared_cluster_param_0];
+; PTRCONV-NEXT: cvta.to.shared::cluster.u64 %rd2, %rd1;
+; PTRCONV-NEXT: cvt.u32.u64 %r1, %rd2;
+; PTRCONV-NEXT: ld.shared::cluster.u32 %r2, [%r1];
+; PTRCONV-NEXT: st.param.b32 [func_retval0], %r2;
+; PTRCONV-NEXT: ret;
%specptr = addrspacecast ptr %ptr to ptr addrspace(7)
%val = load i32, ptr addrspace(7) %specptr
ret i32 %val
@@ -29,9 +70,33 @@ define i32 @conv_generic_to_shared_cluster(ptr %ptr) {
; ALL-LABEL: conv_shared_to_shared_cluster
define i32 @conv_shared_to_shared_cluster(ptr addrspace(3) %ptr) {
-; CLS64: cvta.shared.u64
-; CLS64: cvta.to.shared::cluster.u64
-; ALL: ld.shared::cluster.u32
+; NOPTRCONV-LABEL: conv_shared_to_shared_cluster(
+; NOPTRCONV: {
+; NOPTRCONV-NEXT: .reg .b32 %r<2>;
+; NOPTRCONV-NEXT: .reg .b64 %rd<4>;
+; NOPTRCONV-EMPTY:
+; NOPTRCONV-NEXT: // %bb.0:
+; NOPTRCONV-NEXT: ld.param.u64 %rd1, [conv_shared_to_shared_cluster_param_0];
+; NOPTRCONV-NEXT: cvta.shared.u64 %rd2, %rd1;
+; NOPTRCONV-NEXT: cvta.to.shared::cluster.u64 %rd3, %rd2;
+; NOPTRCONV-NEXT: ld.shared::cluster.u32 %r1, [%rd3];
+; NOPTRCONV-NEXT: st.param.b32 [func_retval0], %r1;
+; NOPTRCONV-NEXT: ret;
+;
+; PTRCONV-LABEL: conv_shared_to_shared_cluster(
+; PTRCONV: {
+; PTRCONV-NEXT: .reg .b32 %r<4>;
+; PTRCONV-NEXT: .reg .b64 %rd<4>;
+; PTRCONV-EMPTY:
+; PTRCONV-NEXT: // %bb.0:
+; PTRCONV-NEXT: ld.param.u32 %r1, [conv_shared_to_shared_cluster_param_0];
+; PTRCONV-NEXT: cvt.u64.u32 %rd1, %r1;
+; PTRCONV-NEXT: cvta.shared.u64 %rd2, %rd1;
+; PTRCONV-NEXT: cvta.to.shared::cluster.u64 %rd3, %rd2;
+; PTRCONV-NEXT: cvt.u32.u64 %r2, %rd3;
+; PTRCONV-NEXT: ld.shared::cluster.u32 %r3, [%r2];
+; PTRCONV-NEXT: st.param.b32 [func_retval0], %r3;
+; PTRCONV-NEXT: ret;
%specptr = addrspacecast ptr addrspace(3) %ptr to ptr addrspace(7)
%val = load i32, ptr addrspace(7) %specptr
ret i32 %val
@@ -39,10 +104,34 @@ define i32 @conv_shared_to_shared_cluster(ptr addrspace(3) %ptr) {
; ALL-LABEL: conv_shared_cluster_to_shared
define i32 @conv_shared_cluster_to_shared(ptr addrspace(7) %ptr) {
-; CLS64: cvta.shared::cluster.u64
-; CLS64: cvta.to.shared.u64
-; ALL: ld.shared.u32
+; NOPTRCONV-LABEL: conv_shared_cluster_to_shared(
+; NOPTRCONV: {
+; NOPTRCONV-NEXT: .reg .b32 %r<2>;
+; NOPTRCONV-NEXT: .reg .b64 %rd<4>;
+; NOPTRCONV-EMPTY:
+; NOPTRCONV-NEXT: // %bb.0:
+; NOPTRCONV-NEXT: ld.param.u64 %rd1, [conv_shared_cluster_to_shared_param_0];
+; NOPTRCONV-NEXT: cvta.shared::cluster.u64 %rd2, %rd1;
+; NOPTRCONV-NEXT: cvta.to.shared.u64 %rd3, %rd2;
+; NOPTRCONV-NEXT: ld.shared.u32 %r1, [%rd3];
+; NOPTRCONV-NEXT: st.param.b32 [func_retval0], %r1;
+; NOPTRCONV-NEXT: ret;
+;
+; PTRCONV-LABEL: conv_shared_cluster_to_shared(
+; PTRCONV: {
+; PTRCONV-NEXT: .reg .b32 %r<4>;
+; PTRCONV-NEXT: .reg .b64 %rd<4>;
+; PTRCONV-EMPTY:
+; PTRCONV-NEXT: // %bb.0:
+; PTRCONV-NEXT: ld.param.u32 %r1, [conv_shared_cluster_to_shared_param_0];
+; PTRCONV-NEXT: cvt.u64.u32 %rd1, %r1;
+; PTRCONV-NEXT: cvta.shared::cluster.u64 %rd2, %rd1;
+; PTRCONV-NEXT: cvta.to.shared.u64 %rd3, %rd2;
+; PTRCONV-NEXT: cvt.u32.u64 %r2, %rd3;
+; PTRCONV-NEXT: ld.shared.u32 %r3, [%r2];
+; PTRCONV-NEXT: st.param.b32 [func_retval0], %r3;
+; PTRCONV-NEXT: ret;
%specptr = addrspacecast ptr addrspace(7) %ptr to ptr addrspace(3)
%val = load i32, ptr addrspace(3) %specptr
ret i32 %val
-}
\ No newline at end of file
+}
diff --git a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
index 627065ef8fcfc..c8478029e3990 100644
--- a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
+++ b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
@@ -4,14 +4,6 @@
target triple = "nvptx64-nvidia-cuda"
- at llvm.used = appending global [5 x ptr] [
- ptr @test_distributed_shared_cluster_common,
- ptr @test_distributed_shared_cluster_float_atomic,
- ptr @test_distributed_shared_cluster_int_atomic,
- ptr @test_distributed_shared_cluster_bitwise_atomic,
- ptr @test_distributed_shared_cluster_cmpxchg
-], section "llvm.metadata"
-
declare ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i32)
declare i1 @llvm.nvvm.isspacep.shared.cluster(ptr)
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
diff --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/isspacep.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/isspacep.ll
index 348fa688770df..26b08848529a7 100644
--- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/isspacep.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/isspacep.ll
@@ -114,17 +114,14 @@ entry:
ret i1 %val
}
-define i1 @test_isspacep_cluster_shared_unsure(ptr addrspace(3) %addr) {
-; CHECK-LABEL: define i1 @test_isspacep_cluster_shared_unsure(
-; CHECK-SAME: ptr addrspace(3) [[ADDR:%.*]]) {
+define i1 @test_isspacep_cluster_shared_true(ptr addrspace(7) %addr) {
+; CHECK-LABEL: define i1 @test_isspacep_cluster_shared_true(
+; CHECK-SAME: ptr addrspace(7) [[ADDR:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[ADDR1:%.*]] = getelementptr i8, ptr addrspace(3) [[ADDR]], i32 10
-; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(3) [[ADDR1]] to ptr
-; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.nvvm.isspacep.shared.cluster(ptr [[TMP0]])
-; CHECK-NEXT: ret i1 [[VAL]]
+; CHECK-NEXT: ret i1 true
;
entry:
- %addr0 = addrspacecast ptr addrspace(3) %addr to ptr
+ %addr0 = addrspacecast ptr addrspace(7) %addr to ptr
%addr1 = getelementptr i8, ptr %addr0, i32 10
%val = call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %addr1)
ret i1 %val
>From b72bd0bd12b241e319e27460502066f1b0492227 Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Mon, 21 Apr 2025 15:37:35 -0700
Subject: [PATCH 6/9] more review feedback
---
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 9 +++--
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 6 +--
llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp | 4 +-
.../Target/NVPTX/NVPTXTargetTransformInfo.cpp | 6 ++-
.../InferAddressSpaces/NVPTX/isspacep.ll | 39 +++++++++++++++++--
5 files changed, 50 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 58bb093e6ec90..18baf1f338023 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -3050,13 +3050,14 @@ SDValue NVPTXTargetLowering::LowerADDRSPACECAST(SDValue Op,
DestAS == llvm::ADDRESS_SPACE_SHARED_CLUSTER) ||
(SrcAS == llvm::ADDRESS_SPACE_SHARED_CLUSTER &&
DestAS == llvm::ADDRESS_SPACE_SHARED)) {
+ SDLoc DL(Op.getNode());
const MVT GenerictVT =
getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_GENERIC);
- const MVT OutputVT = getPointerTy(DAG.getDataLayout(), DestAS);
SDValue GenericConversion = DAG.getAddrSpaceCast(
- SDLoc(), GenerictVT, Op.getOperand(0), SrcAS, ADDRESS_SPACE_GENERIC);
- SDValue SharedClusterConversion = DAG.getAddrSpaceCast(
- SDLoc(), OutputVT, GenericConversion, ADDRESS_SPACE_GENERIC, DestAS);
+ DL, GenerictVT, Op.getOperand(0), SrcAS, ADDRESS_SPACE_GENERIC);
+ SDValue SharedClusterConversion =
+ DAG.getAddrSpaceCast(DL, Op.getValueType(), GenericConversion,
+ ADDRESS_SPACE_GENERIC, DestAS);
return SharedClusterConversion;
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 0b2c04ee3b729..dc29045fca43d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -2416,10 +2416,8 @@ foreach space = ["local", "shared", "global", "const", "param"] in {
defm cvta_to_#space : G_TO_NG<space>;
}
-foreach space = ["shared::cluster"] in {
- defm cvta_#space : NG_TO_G<space, false, [hasclusters]>;
- defm cvta_to_#space : G_TO_NG<space, false, [hasclusters]>;
-}
+defm cvta_shared_cluster : NG_TO_G<"shared::cluster", false, [hasClusters]>;
+defm cvta_to_shared_cluster : G_TO_NG<"shared::cluster", false, [hasClusters]>;
def : Pat<(int_nvvm_ptr_param_to_gen i32:$src),
(cvta_param $src)>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index 88896c8f407ad..1a7b20390a562 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -282,8 +282,10 @@ NVPTXTargetMachine::getPredicatedAddrSpace(const Value *V) const {
case Intrinsic::nvvm_isspacep_local:
return std::make_pair(II->getArgOperand(0), llvm::ADDRESS_SPACE_LOCAL);
case Intrinsic::nvvm_isspacep_shared:
- case Intrinsic::nvvm_isspacep_shared_cluster:
return std::make_pair(II->getArgOperand(0), llvm::ADDRESS_SPACE_SHARED);
+ case Intrinsic::nvvm_isspacep_shared_cluster:
+ return std::make_pair(II->getArgOperand(0),
+ llvm::ADDRESS_SPACE_SHARED_CLUSTER);
default:
break;
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index e662728ae9295..e0c946290adf2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -424,9 +424,13 @@ static std::optional<bool> evaluateIsSpace(Intrinsic::ID IID, unsigned AS) {
case Intrinsic::nvvm_isspacep_local:
return AS == NVPTXAS::ADDRESS_SPACE_LOCAL;
case Intrinsic::nvvm_isspacep_shared:
+ // If shared cluster this can't be evaluated at compile time.
+ if (AS == NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER)
+ return std::nullopt;
return AS == NVPTXAS::ADDRESS_SPACE_SHARED;
case Intrinsic::nvvm_isspacep_shared_cluster:
- return AS == NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER;
+ return AS == NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER ||
+ AS == NVPTXAS::ADDRESS_SPACE_SHARED;
case Intrinsic::nvvm_isspacep_const:
return AS == NVPTXAS::ADDRESS_SPACE_CONST;
default:
diff --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/isspacep.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/isspacep.ll
index 26b08848529a7..d05e106d81342 100644
--- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/isspacep.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/isspacep.ll
@@ -114,8 +114,8 @@ entry:
ret i1 %val
}
-define i1 @test_isspacep_cluster_shared_true(ptr addrspace(7) %addr) {
-; CHECK-LABEL: define i1 @test_isspacep_cluster_shared_true(
+define i1 @test_isspacep_shared_cluster_true(ptr addrspace(7) %addr) {
+; CHECK-LABEL: define i1 @test_isspacep_shared_cluster_true(
; CHECK-SAME: ptr addrspace(7) [[ADDR:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: ret i1 true
@@ -127,8 +127,8 @@ entry:
ret i1 %val
}
-define i1 @test_isspacep_cluster_shared_false(ptr addrspace(1) %addr) {
-; CHECK-LABEL: define i1 @test_isspacep_cluster_shared_false(
+define i1 @test_isspacep_shared_cluster_false(ptr addrspace(1) %addr) {
+; CHECK-LABEL: define i1 @test_isspacep_shared_cluster_false(
; CHECK-SAME: ptr addrspace(1) [[ADDR:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: ret i1 false
@@ -139,3 +139,34 @@ entry:
%val = call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %addr1)
ret i1 %val
}
+
+; isspacep_shared_cluster returns true for shared
+define i1 @test_isspacep_cluster_shared_shared(ptr addrspace(3) %addr) {
+; CHECK-LABEL: define i1 @test_isspacep_cluster_shared_shared(
+; CHECK-SAME: ptr addrspace(3) [[ADDR:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: ret i1 true
+;
+entry:
+ %addr0 = addrspacecast ptr addrspace(3) %addr to ptr
+ %addr1 = getelementptr i8, ptr %addr0, i32 10
+ %val = call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %addr1)
+ ret i1 %val
+}
+
+; shared cluster cannot be evaluated to shared at compile time
+define i1 @test_isspacep_shared_shared_cluster(ptr addrspace(7) %addr) {
+; CHECK-LABEL: define i1 @test_isspacep_shared_shared_cluster(
+; CHECK-SAME: ptr addrspace(7) [[ADDR:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[ADDR2:%.*]] = getelementptr i8, ptr addrspace(7) [[ADDR]], i32 10
+; CHECK-NEXT: [[ADDR1:%.*]] = addrspacecast ptr addrspace(7) [[ADDR2]] to ptr
+; CHECK-NEXT: [[VAL:%.*]] = call i1 @llvm.nvvm.isspacep.shared(ptr [[ADDR1]])
+; CHECK-NEXT: ret i1 [[VAL]]
+;
+entry:
+ %addr0 = addrspacecast ptr addrspace(7) %addr to ptr
+ %addr1 = getelementptr i8, ptr %addr0, i32 10
+ %val = call i1 @llvm.nvvm.isspacep.shared(ptr %addr1)
+ ret i1 %val
+}
>From 1d29c8479e5cc3fcba1d1dce3b81918c27a331c8 Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Mon, 21 Apr 2025 22:57:49 -0700
Subject: [PATCH 7/9] for -> if on td definition
---
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 30 ++++++++++--------------
1 file changed, 12 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index dc29045fca43d..a6595e512dbae 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -2388,27 +2388,21 @@ def INT_PTX_LDG_G_v4f32_ELE : VLDG_G_ELE_V4<"f32", Float32Regs>;
multiclass NG_TO_G<string Str, bit Supports32 = 1, list<Predicate> Preds = []> {
- foreach bitwidth = !if(Supports32, ["32", "64"], ["64"]) in {
- if !eq(bitwidth, "32") then {
- def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
- "cvta." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
- } else if !eq(bitwidth, "64") then {
- def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- "cvta." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
- }
- }
+ if Supports32 then
+ def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ "cvta." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
+
+ def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "cvta." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
}
multiclass G_TO_NG<string Str, bit Supports32 = 1, list<Predicate> Preds = []> {
- foreach bitwidth = !if(Supports32, ["32", "64"], ["64"]) in {
- if !eq(bitwidth, "32") then {
- def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
- "cvta.to." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
- } else if !eq(bitwidth, "64") then {
- def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
- "cvta.to." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
- }
- }
+ if Supports32 then
+ def "" : NVPTXInst<(outs Int32Regs:$result), (ins Int32Regs:$src),
+ "cvta.to." # Str # ".u32 \t$result, $src;", []>, Requires<Preds>;
+
+ def _64 : NVPTXInst<(outs Int64Regs:$result), (ins Int64Regs:$src),
+ "cvta.to." # Str # ".u64 \t$result, $src;", []>, Requires<Preds>;
}
foreach space = ["local", "shared", "global", "const", "param"] in {
>From c0637c6a4310d8fb8d12f7d53c2e53cfc8f8738d Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Tue, 22 Apr 2025 14:00:12 -0700
Subject: [PATCH 8/9] factor out intrinsics update change to another PR
---
.../NVPTX/distributed-shared-cluster.ll | 100 +++++-------------
1 file changed, 25 insertions(+), 75 deletions(-)
diff --git a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
index c8478029e3990..8b6c554aeb9f2 100644
--- a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
+++ b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
@@ -4,56 +4,6 @@
target triple = "nvptx64-nvidia-cuda"
-declare ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i32)
-declare i1 @llvm.nvvm.isspacep.shared.cluster(ptr)
-declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
-declare ptr @llvm.nvvm.mapa(ptr, i32)
-
-; Common setup for distributed shared memory cluster addressing
-define i32 @test_distributed_shared_cluster_common(ptr %ptr, ptr addrspace(3) %smem_ptr) local_unnamed_addr {
-; CHECK-LABEL: test_distributed_shared_cluster_common(
-; CHECK: {
-; CHECK-NEXT: .reg .pred %p<3>;
-; CHECK-NEXT: .reg .b32 %r<8>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0: // %entry
-; CHECK-NEXT: ld.param.u64 %rd1, [test_distributed_shared_cluster_common_param_0];
-; CHECK-NEXT: ld.param.u64 %rd2, [test_distributed_shared_cluster_common_param_1];
-; CHECK-NEXT: mov.u32 %r1, %ctaid.x;
-; CHECK-NEXT: xor.b32 %r2, %r1, 1;
-; CHECK-NEXT: isspacep.shared::cluster %p1, %rd1;
-; CHECK-NEXT: mapa.u64 %rd3, %rd1, %r2;
-; CHECK-NEXT: isspacep.shared::cluster %p2, %rd3;
-; CHECK-NEXT: mapa.shared::cluster.u64 %rd4, %rd2, %r2;
-; CHECK-NEXT: ld.shared::cluster.u32 %r3, [%rd4];
-; CHECK-NEXT: add.s32 %r4, %r3, 42;
-; CHECK-NEXT: st.shared::cluster.u32 [%rd4], %r4;
-; CHECK-NEXT: selp.b32 %r5, 1, 0, %p1;
-; CHECK-NEXT: selp.b32 %r6, 1, 0, %p2;
-; CHECK-NEXT: add.s32 %r7, %r5, %r6;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r7;
-; CHECK-NEXT: ret;
-entry:
- %0 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
- %1 = xor i32 %0, 1
- %2 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %ptr)
- %3 = tail call ptr @llvm.nvvm.mapa(ptr %ptr, i32 %1)
- %4 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %3)
- %dsmem_ptr = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %smem_ptr, i32 %1)
-
- ; Add load and store to the distributed shared memory cluster
- %loaded_val = load i32, ptr addrspace(7) %dsmem_ptr
- %updated_val = add i32 %loaded_val, 42
- store i32 %updated_val, ptr addrspace(7) %dsmem_ptr
-
- ; Return value preserves the isspacep test results plus the value operation
- %5 = zext i1 %2 to i32
- %6 = zext i1 %4 to i32
- %ret = add i32 %5, %6
- ret i32 %ret
-}
-
; Floating point atomic operations tests
define void @test_distributed_shared_cluster_float_atomic(ptr addrspace(7) %dsmem_ptr) local_unnamed_addr {
; CHECK-LABEL: test_distributed_shared_cluster_float_atomic(
@@ -207,84 +157,84 @@ define void @test_distributed_shared_cluster_cmpxchg(ptr addrspace(7) %dsmem_ptr
; CHECK-NEXT: shl.b32 %r3, %r37, %r1;
; CHECK-NEXT: ld.shared::cluster.u32 %r38, [%rd1];
; CHECK-NEXT: and.b32 %r48, %r38, %r2;
-; CHECK-NEXT: $L__BB4_1: // %partword.cmpxchg.loop33
+; CHECK-NEXT: $L__BB3_1: // %partword.cmpxchg.loop33
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: or.b32 %r39, %r48, %r3;
; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r6, [%rd1], %r39, %r48;
; CHECK-NEXT: setp.eq.s32 %p1, %r6, %r39;
-; CHECK-NEXT: @%p1 bra $L__BB4_3;
+; CHECK-NEXT: @%p1 bra $L__BB3_3;
; CHECK-NEXT: // %bb.2: // %partword.cmpxchg.failure32
-; CHECK-NEXT: // in Loop: Header=BB4_1 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB3_1 Depth=1
; CHECK-NEXT: and.b32 %r7, %r6, %r2;
; CHECK-NEXT: setp.ne.s32 %p2, %r48, %r7;
; CHECK-NEXT: mov.b32 %r48, %r7;
-; CHECK-NEXT: @%p2 bra $L__BB4_1;
-; CHECK-NEXT: $L__BB4_3: // %partword.cmpxchg.end31
+; CHECK-NEXT: @%p2 bra $L__BB3_1;
+; CHECK-NEXT: $L__BB3_3: // %partword.cmpxchg.end31
; CHECK-NEXT: ld.shared::cluster.u32 %r40, [%rd1];
; CHECK-NEXT: and.b32 %r49, %r40, %r2;
-; CHECK-NEXT: $L__BB4_4: // %partword.cmpxchg.loop23
+; CHECK-NEXT: $L__BB3_4: // %partword.cmpxchg.loop23
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: or.b32 %r41, %r49, %r3;
; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r10, [%rd1], %r41, %r49;
; CHECK-NEXT: setp.eq.s32 %p3, %r10, %r41;
-; CHECK-NEXT: @%p3 bra $L__BB4_6;
+; CHECK-NEXT: @%p3 bra $L__BB3_6;
; CHECK-NEXT: // %bb.5: // %partword.cmpxchg.failure22
-; CHECK-NEXT: // in Loop: Header=BB4_4 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB3_4 Depth=1
; CHECK-NEXT: and.b32 %r11, %r10, %r2;
; CHECK-NEXT: setp.ne.s32 %p4, %r49, %r11;
; CHECK-NEXT: mov.b32 %r49, %r11;
-; CHECK-NEXT: @%p4 bra $L__BB4_4;
-; CHECK-NEXT: $L__BB4_6: // %partword.cmpxchg.end21
+; CHECK-NEXT: @%p4 bra $L__BB3_4;
+; CHECK-NEXT: $L__BB3_6: // %partword.cmpxchg.end21
; CHECK-NEXT: fence.acq_rel.sys;
; CHECK-NEXT: fence.acq_rel.sys;
; CHECK-NEXT: ld.shared::cluster.u32 %r42, [%rd1];
; CHECK-NEXT: and.b32 %r50, %r42, %r2;
-; CHECK-NEXT: $L__BB4_7: // %partword.cmpxchg.loop13
+; CHECK-NEXT: $L__BB3_7: // %partword.cmpxchg.loop13
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: or.b32 %r43, %r50, %r3;
; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r14, [%rd1], %r43, %r50;
; CHECK-NEXT: setp.eq.s32 %p5, %r14, %r43;
-; CHECK-NEXT: @%p5 bra $L__BB4_9;
+; CHECK-NEXT: @%p5 bra $L__BB3_9;
; CHECK-NEXT: // %bb.8: // %partword.cmpxchg.failure12
-; CHECK-NEXT: // in Loop: Header=BB4_7 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB3_7 Depth=1
; CHECK-NEXT: and.b32 %r15, %r14, %r2;
; CHECK-NEXT: setp.ne.s32 %p6, %r50, %r15;
; CHECK-NEXT: mov.b32 %r50, %r15;
-; CHECK-NEXT: @%p6 bra $L__BB4_7;
-; CHECK-NEXT: $L__BB4_9: // %partword.cmpxchg.end11
+; CHECK-NEXT: @%p6 bra $L__BB3_7;
+; CHECK-NEXT: $L__BB3_9: // %partword.cmpxchg.end11
; CHECK-NEXT: fence.acq_rel.sys;
; CHECK-NEXT: ld.shared::cluster.u32 %r44, [%rd1];
; CHECK-NEXT: and.b32 %r51, %r44, %r2;
-; CHECK-NEXT: $L__BB4_10: // %partword.cmpxchg.loop3
+; CHECK-NEXT: $L__BB3_10: // %partword.cmpxchg.loop3
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: or.b32 %r45, %r51, %r3;
; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r18, [%rd1], %r45, %r51;
; CHECK-NEXT: setp.eq.s32 %p7, %r18, %r45;
-; CHECK-NEXT: @%p7 bra $L__BB4_12;
+; CHECK-NEXT: @%p7 bra $L__BB3_12;
; CHECK-NEXT: // %bb.11: // %partword.cmpxchg.failure2
-; CHECK-NEXT: // in Loop: Header=BB4_10 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB3_10 Depth=1
; CHECK-NEXT: and.b32 %r19, %r18, %r2;
; CHECK-NEXT: setp.ne.s32 %p8, %r51, %r19;
; CHECK-NEXT: mov.b32 %r51, %r19;
-; CHECK-NEXT: @%p8 bra $L__BB4_10;
-; CHECK-NEXT: $L__BB4_12: // %partword.cmpxchg.end1
+; CHECK-NEXT: @%p8 bra $L__BB3_10;
+; CHECK-NEXT: $L__BB3_12: // %partword.cmpxchg.end1
; CHECK-NEXT: fence.acq_rel.sys;
; CHECK-NEXT: fence.sc.sys;
; CHECK-NEXT: ld.shared::cluster.u32 %r46, [%rd1];
; CHECK-NEXT: and.b32 %r52, %r46, %r2;
-; CHECK-NEXT: $L__BB4_13: // %partword.cmpxchg.loop
+; CHECK-NEXT: $L__BB3_13: // %partword.cmpxchg.loop
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: or.b32 %r47, %r52, %r3;
; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r22, [%rd1], %r47, %r52;
; CHECK-NEXT: setp.eq.s32 %p9, %r22, %r47;
-; CHECK-NEXT: @%p9 bra $L__BB4_15;
+; CHECK-NEXT: @%p9 bra $L__BB3_15;
; CHECK-NEXT: // %bb.14: // %partword.cmpxchg.failure
-; CHECK-NEXT: // in Loop: Header=BB4_13 Depth=1
+; CHECK-NEXT: // in Loop: Header=BB3_13 Depth=1
; CHECK-NEXT: and.b32 %r23, %r22, %r2;
; CHECK-NEXT: setp.ne.s32 %p10, %r52, %r23;
; CHECK-NEXT: mov.b32 %r52, %r23;
-; CHECK-NEXT: @%p10 bra $L__BB4_13;
-; CHECK-NEXT: $L__BB4_15: // %partword.cmpxchg.end
+; CHECK-NEXT: @%p10 bra $L__BB3_13;
+; CHECK-NEXT: $L__BB3_15: // %partword.cmpxchg.end
; CHECK-NEXT: fence.acq_rel.sys;
; CHECK-NEXT: ret;
entry:
>From 36c29a8b55034a3700072a042ba6c7df08a0a2b7 Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Tue, 22 Apr 2025 14:19:33 -0700
Subject: [PATCH 9/9] properly remove the existing intrinsics changes
---
clang/test/CodeGenCUDA/builtins-sm90.cu | 2 +-
llvm/docs/NVPTXUsage.rst | 33 +------
llvm/include/llvm/IR/IntrinsicsNVVM.td | 53 +++++-----
llvm/lib/IR/AutoUpgrade.cpp | 83 ----------------
.../Assembler/auto_upgrade_nvvm_intrinsics.ll | 57 -----------
.../CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll | 96 +++++++++----------
llvm/test/CodeGen/NVPTX/cp-async-bulk.ll | 18 ++--
.../include/mlir/Dialect/LLVMIR/NVVMDialect.h | 5 +-
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td | 5 +-
.../Target/LLVMIR/nvvm/tma_bulk_copy.mlir | 24 ++---
10 files changed, 102 insertions(+), 274 deletions(-)
diff --git a/clang/test/CodeGenCUDA/builtins-sm90.cu b/clang/test/CodeGenCUDA/builtins-sm90.cu
index f4746df944536..a639c7716adb1 100644
--- a/clang/test/CodeGenCUDA/builtins-sm90.cu
+++ b/clang/test/CodeGenCUDA/builtins-sm90.cu
@@ -50,7 +50,7 @@ __attribute__((global)) void kernel(long *out, void *ptr, unsigned u) {
auto * sptr = (__attribute__((address_space(3))) void *)ptr;
// CHECK: call ptr @llvm.nvvm.mapa(ptr %{{.*}}, i32 %{{.*}})
out[i++] = (long) __nvvm_mapa(ptr, u);
- // CHECK: call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
+ // CHECK: call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
out[i++] = (long) __nvvm_mapa_shared_cluster(sptr, u);
// CHECK: call i32 @llvm.nvvm.getctarank(ptr {{.*}})
out[i++] = __nvvm_getctarank(ptr);
diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index c1426823d87af..ef6e888286def 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -108,7 +108,6 @@ The NVPTX back-end uses the following address space mapping:
3 Shared
4 Constant
5 Local
- 7 Shared Cluster
============= ======================
Every global variable and pointer type is assigned to one of these address
@@ -307,32 +306,6 @@ If the given pointer in the generic address space refers to memory which falls
within the state space of the intrinsic (and therefore could be safely address
space casted to this space), 1 is returned, otherwise 0 is returned.
-'``llvm.nvvm.mapa.*``' Intrinsics
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Syntax:
-"""""""
-
-.. code-block:: llvm
-
- declare ptr @llvm.nvvm.mapa(ptr %p, i32 %rank)
- declare ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p, i32 %rank)
-
-Overview:
-"""""""""
-
-The '``llvm.nvvm.mapa.*``' intrinsics map a shared memory pointer ``p`` of another CTA with ``%rank`` to the current CTA.
-The ``llvm.nvvm.mapa`` form expects a generic pointer to shared memory and returns a generic pointer to shared cluster memory.
-The ``llvm.nvvm.mapa.shared.cluster`` form expects a pointer to shared memory and returns a pointer to shared cluster memory.
-They corresponds directly to the ``mapa`` and ``mapa.shared.cluster`` PTX instructions.
-
-Semantics:
-""""""""""
-
-If the given pointer in the generic address space refers to memory which falls
-within the state space of the intrinsic (and therefore could be safely address
-space casted to this space), 1 is returned, otherwise 0 is returned.
-
Arithmetic Intrinsics
---------------------
@@ -579,7 +552,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %mbar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
+ declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %mbar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
Overview:
"""""""""
@@ -643,7 +616,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %mbar, ptr addrspace(3) %src, i32 %size)
+ declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %mbar, ptr addrspace(3) %src, i32 %size)
Overview:
"""""""""
@@ -798,7 +771,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
+ declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(..., i32 %d0, i32 %d1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(..., i32 %d0, i32 %d1, i32 %d2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 94367be6ee8e2..d09e1da457249 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -127,11 +127,10 @@
// * llvm.nvvm.atomic.load.inc.32 --> atomicrmw uinc_wrap
// * llvm.nvvm.atomic.load.dec.32 --> atomicrmw udec_wrap
-def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
-def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
-def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
-def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
-def llvm_shared_cluster_ptr_ty : LLVMQualPointerType<7>; // (shared_cluster)ptr
+def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
+def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
+def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
+def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
//
// MISC
@@ -692,15 +691,15 @@ class CP_ASYNC_BULK_TENSOR_G2S_INTR<int dim, string mode> {
list<LLVMType> Im2ColOffsetsTy = !listsplat(llvm_i16_ty, NumIm2ColOffsets);
list<LLVMType> TensorDimsTy = !listsplat(llvm_i32_ty, dim);
list<LLVMType> ArgsTy = !listconcat(
- [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
- llvm_shared_ptr_ty, // mbarrier_smem_ptr
- llvm_ptr_ty], // tensormap_ptr
- TensorDimsTy, // actual tensor dims
- Im2ColOffsetsTy, // im2col offsets
- [llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty] // Flag for cache_hint
+ [llvm_shared_ptr_ty, // dst_smem_ptr
+ llvm_shared_ptr_ty, // mbarrier_smem_ptr
+ llvm_ptr_ty], // tensormap_ptr
+ TensorDimsTy, // actual tensor dims
+ Im2ColOffsetsTy, // im2col offsets
+ [llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty] // Flag for cache_hint
);
int TempFlagsStartIdx = !add(dim, 5);
@@ -5135,7 +5134,7 @@ def int_nvvm_mapa
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa">;
def int_nvvm_mapa_shared_cluster
- : DefaultAttrsIntrinsic<[llvm_shared_cluster_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
+ : DefaultAttrsIntrinsic<[llvm_shared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa.shared.cluster">;
def int_nvvm_getctarank
@@ -5235,14 +5234,14 @@ def int_nvvm_discard_L2 : DefaultAttrsIntrinsic<[],
// From Global to Shared Cluster
def int_nvvm_cp_async_bulk_global_to_shared_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_global_ptr_ty, // src_gmem_ptr
- llvm_i32_ty, // copy_size
- llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty], // Flag for cache_hint
+ [llvm_shared_ptr_ty, // dst_smem_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_global_ptr_ty, // src_gmem_ptr
+ llvm_i32_ty, // copy_size
+ llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty], // Flag for cache_hint
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
@@ -5252,10 +5251,10 @@ def int_nvvm_cp_async_bulk_global_to_shared_cluster
// From Shared CTA to Shared Cluster
def int_nvvm_cp_async_bulk_shared_cta_to_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_shared_ptr_ty, // src_smem_ptr
- llvm_i32_ty], // copy_size
+ [llvm_shared_ptr_ty, // dst_smem_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_shared_ptr_ty, // src_smem_ptr
+ llvm_i32_ty], // copy_size
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 01e9b61b38944..2e17b8ce5eb7b 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -46,7 +46,6 @@
#include "llvm/Support/AMDGPUAddrSpace.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/NVPTXAddrSpace.h"
#include "llvm/Support/Regex.h"
#include "llvm/TargetParser/Triple.h"
#include <cstdint>
@@ -939,47 +938,6 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
return false; // No other 'arm.*', 'aarch64.*'.
}
-static Intrinsic::ID shouldUpgradeNVPTXSharedClusterIntrinsic(Function *F,
- StringRef Name) {
- if (Name.consume_front("mapa.shared.cluster"))
- if (F->getReturnType()->getPointerAddressSpace() ==
- NVPTXAS::ADDRESS_SPACE_SHARED)
- return Intrinsic::nvvm_mapa_shared_cluster;
-
- if (Name.consume_front("cp.async.bulk.")) {
- Intrinsic::ID ID =
- StringSwitch<Intrinsic::ID>(Name)
- .Case("global.to.shared.cluster",
- Intrinsic::nvvm_cp_async_bulk_global_to_shared_cluster)
- .Case("shared.cta.to.cluster",
- Intrinsic::nvvm_cp_async_bulk_shared_cta_to_cluster)
- .Case("tensor.g2s.im2col.3d",
- Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_3d)
- .Case("tensor.g2s.im2col.4d",
- Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_4d)
- .Case("tensor.g2s.im2col.5d",
- Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_5d)
- .Case("tensor.g2s.tile.1d",
- Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_1d)
- .Case("tensor.g2s.tile.2d",
- Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_2d)
- .Case("tensor.g2s.tile.3d",
- Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d)
- .Case("tensor.g2s.tile.4d",
- Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d)
- .Case("tensor.g2s.tile.5d",
- Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d)
- .Default(Intrinsic::not_intrinsic);
-
- if (ID != Intrinsic::not_intrinsic)
- if (F->getArg(0)->getType()->getPointerAddressSpace() ==
- NVPTXAS::ADDRESS_SPACE_SHARED)
- return ID;
- }
-
- return Intrinsic::not_intrinsic;
-}
-
static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
if (Name.consume_front("fma.rn."))
return StringSwitch<Intrinsic::ID>(Name)
@@ -1320,14 +1278,6 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
}
}
- // Upgrade Distributed Shared Memory Intrinsics
- Intrinsic::ID IID = shouldUpgradeNVPTXSharedClusterIntrinsic(F, Name);
- if (IID != Intrinsic::not_intrinsic) {
- rename(F);
- NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
- return true;
- }
-
// The following nvvm intrinsics correspond exactly to an LLVM idiom, but
// not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
//
@@ -4768,39 +4718,6 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
CI->eraseFromParent();
return;
}
- case Intrinsic::nvvm_mapa_shared_cluster: {
- // Create a new call with the correct address space.
- NewCall =
- Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)});
- Value *Res = NewCall;
- Res = Builder.CreateAddrSpaceCast(
- Res, Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_SHARED));
- NewCall->takeName(CI);
- CI->replaceAllUsesWith(Res);
- CI->eraseFromParent();
- return;
- }
- case Intrinsic::nvvm_cp_async_bulk_global_to_shared_cluster:
- case Intrinsic::nvvm_cp_async_bulk_shared_cta_to_cluster:
- case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_3d:
- case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_4d:
- case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_5d:
- case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_1d:
- case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_2d:
- case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d:
- case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d:
- case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d: {
- // Create a new call with the correct address space.
- SmallVector<Value *, 4> Args(CI->args());
- Args[0] = Builder.CreateAddrSpaceCast(
- Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER));
-
- NewCall = Builder.CreateCall(NewFn, Args);
- NewCall->takeName(CI);
- CI->replaceAllUsesWith(NewCall);
- CI->eraseFromParent();
- return;
- }
case Intrinsic::riscv_sha256sig0:
case Intrinsic::riscv_sha256sig1:
case Intrinsic::riscv_sha256sum0:
diff --git a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
index 9b1dd7088f1dd..a3ddc62dd34e9 100644
--- a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -59,21 +59,6 @@ declare float @llvm.nvvm.ldg.global.f.f32.p0(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.inc.32(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.dec.32(ptr, i32)
-declare ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i32)
-
-declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32)
-
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-
; CHECK-LABEL: @simple_upgrade
define void @simple_upgrade(i32 %a, i64 %b, i16 %c) {
; CHECK: call i32 @llvm.bitreverse.i32(i32 %a)
@@ -269,45 +254,3 @@ define i32 @atomics(ptr %p0, i32 %a) {
ret i32 %r2
}
-; CHECK-LABEL: @nvvm_shared_cluster_intrinsics
-define void @nvvm_shared_cluster_intrinsics(ptr addrspace(3) %p0, i32 %offset) {
-; CHECK: %r = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i32 %offset)
- %r = call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i32 %offset)
- ret void
-}
-
-; CHECK-LABEL: @nvvm_cp_async_bulk_intrinsics
-define void @nvvm_cp_async_bulk_intrinsics(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, ptr addrspace(3) %src_shared, i32 %size) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
- call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
- ret void
-}
-
-; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_im2col
-define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %3, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 0, i64 0, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 0, i64 0, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 0, i64 0, i1 0, i1 0)
- ret void
-}
-
-; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_tile
-define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %1, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %3, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %5, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 0, i64 0, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 0, i64 0, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 0, i64 0, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 0, i64 0, i1 0, i1 0)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 0, i64 0, i1 0, i1 0)
- ret void
-}
-
diff --git a/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll b/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
index 530a896642b89..521dd567f837e 100644
--- a/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
+++ b/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
@@ -6,18 +6,18 @@
target triple = "nvptx64-nvidia-cuda"
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_1d
-define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_1d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -55,18 +55,18 @@ define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(7) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 undef, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 undef, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_2d
-define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_2d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -106,18 +106,18 @@ define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(7) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 undef, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 undef, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_3d
-define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_3d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -159,18 +159,18 @@ define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(7) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_4d
-define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_4d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -214,18 +214,18 @@ define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(7) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_5d
-define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_5d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -271,18 +271,18 @@ define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(7) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_3d
-define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_3d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<3>;
@@ -326,18 +326,18 @@ define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(7) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], {%rs1}, %rs2;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], {%rs1}, %rs2, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_4d
-define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_4d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<4>;
@@ -385,18 +385,18 @@ define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(7) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], {%rs1, %rs2}, %rs3;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], {%rs1, %rs2}, %rs3, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_5d
-define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_5d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<5>;
@@ -448,12 +448,12 @@ define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(7) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], {%rs1, %rs2, %rs3}, %rs4;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], {%rs1, %rs2, %rs3}, %rs4, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll b/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
index bf1b86e37ae72..aa8f553351b46 100644
--- a/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
+++ b/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
@@ -6,12 +6,12 @@
target triple = "nvptx64-nvidia-cuda"
-declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1), ptr addrspace(3), i32, i64, i1)
-declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7), ptr addrspace(3), ptr addrspace(3), i32)
+declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32)
declare void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1), i32, i64, i1)
-define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(3) %dst, i32 %size, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_g2s(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -49,10 +49,10 @@ define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%r2], [%rd1], %r3, [%r1], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 0, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 0, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
@@ -89,7 +89,7 @@ define void @cp_async_bulk_s2g(ptr addrspace(3) %src, ptr addrspace(1) %dst, i32
ret void
}
-define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size) {
+define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3) %bar, ptr addrspace(3) %dst, i32 %size) {
; CHECK-PTX64-LABEL: cp_async_bulk_cta_to_cluster(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b32 %r<2>;
@@ -114,7 +114,7 @@ define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3
; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r4, [cp_async_bulk_cta_to_cluster_param_3];
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.shared::cta.mbarrier::complete_tx::bytes [%r3], [%r1], %r4, [%r2];
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src, i32 %size)
+ tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src, i32 %size)
ret void
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
index f1eae15d6bf18..a9270c6f52344 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
@@ -43,10 +43,7 @@ enum NVVMMemorySpace {
/// Tensor memory space identifier.
/// Tensor memory is available only in arch-accelerated
/// variants from sm100 onwards.
- kTensorMemorySpace = 6,
- /// Distributed shared memory space identifier.
- /// Distributed shared memory is available only in sm90+.
- kSharedClusterMemorySpace = 7,
+ kTensorMemorySpace = 6
};
/// Return the element type and number of elements associated with a wmma matrix
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 5f41d428479b8..0a6e66919f021 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -25,7 +25,6 @@ def LLVM_PointerGeneric : LLVM_PointerInAddressSpace<0>;
def LLVM_PointerGlobal : LLVM_PointerInAddressSpace<1>;
def LLVM_PointerShared : LLVM_PointerInAddressSpace<3>;
def LLVM_PointerTensor : LLVM_PointerInAddressSpace<6>;
-def LLVM_PointerSharedCluster : LLVM_PointerInAddressSpace<7>;
//===----------------------------------------------------------------------===//
// NVVM dialect definitions
@@ -2253,7 +2252,7 @@ def NVVM_CpAsyncBulkGlobalToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerSharedCluster:$dstMem,
+ LLVM_PointerShared:$dstMem,
LLVM_PointerGlobal:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size,
@@ -2310,7 +2309,7 @@ def NVVM_CpAsyncBulkSharedCTAToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerSharedCluster:$dstMem,
+ LLVM_PointerShared:$dstMem,
LLVM_PointerShared:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size);
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
index 39b703d9a9677..0e3f98a134491 100644
--- a/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
@@ -1,25 +1,25 @@
// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
// CHECK-LABEL: @llvm_nvvm_cp_async_bulk_global_to_shared_cluster
-llvm.func @llvm_nvvm_cp_async_bulk_global_to_shared_cluster(%dst : !llvm.ptr<7>, %src : !llvm.ptr<1>, %mbar : !llvm.ptr<3>, %size : i32, %mc : i16, %ch : i64) {
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST:.*]], ptr addrspace(3) %[[MBAR:.*]], ptr addrspace(1) %[[SRC:.*]], i32 %[[SIZE:.*]], i16 0, i64 0, i1 false, i1 false)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 0, i64 %[[CH:.*]], i1 false, i1 true)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC:.*]], i64 0, i1 true, i1 false)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC]], i64 %[[CH]], i1 true, i1 true)
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size : !llvm.ptr<7>, !llvm.ptr<1>
+llvm.func @llvm_nvvm_cp_async_bulk_global_to_shared_cluster(%dst : !llvm.ptr<3>, %src : !llvm.ptr<1>, %mbar : !llvm.ptr<3>, %size : i32, %mc : i16, %ch : i64) {
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST:.*]], ptr addrspace(3) %[[MBAR:.*]], ptr addrspace(1) %[[SRC:.*]], i32 %[[SIZE:.*]], i16 0, i64 0, i1 false, i1 false)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 0, i64 %[[CH:.*]], i1 false, i1 true)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC:.*]], i64 0, i1 true, i1 false)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC]], i64 %[[CH]], i1 true, i1 true)
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size : !llvm.ptr<3>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size l2_cache_hint = %ch : !llvm.ptr<7>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size l2_cache_hint = %ch : !llvm.ptr<3>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc : !llvm.ptr<7>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc : !llvm.ptr<3>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc l2_cache_hint = %ch : !llvm.ptr<7>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc l2_cache_hint = %ch : !llvm.ptr<3>, !llvm.ptr<1>
llvm.return
}
// CHECK-LABEL: @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster
-llvm.func @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster(%dst : !llvm.ptr<7>, %src : !llvm.ptr<3>, %mbar : !llvm.ptr<3>, %size : i32) {
- // CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %0, ptr addrspace(3) %2, ptr addrspace(3) %1, i32 %3)
- nvvm.cp.async.bulk.shared.cluster.shared.cta %dst, %src, %mbar, %size : !llvm.ptr<7>, !llvm.ptr<3>
+llvm.func @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster(%dst : !llvm.ptr<3>, %src : !llvm.ptr<3>, %mbar : !llvm.ptr<3>, %size : i32) {
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %0, ptr addrspace(3) %2, ptr addrspace(3) %1, i32 %3)
+ nvvm.cp.async.bulk.shared.cluster.shared.cta %dst, %src, %mbar, %size : !llvm.ptr<3>, !llvm.ptr<3>
llvm.return
}
More information about the llvm-commits
mailing list