[clang] [llvm] [mlir] [NVPTX] Add support for Shared Cluster Memory address space. (PR #135444)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 11 19:37:57 PDT 2025
https://github.com/modiking updated https://github.com/llvm/llvm-project/pull/135444
>From 5a1c217f05d46198df08a4e8413023b4dd6a62f6 Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Mon, 31 Mar 2025 22:54:56 -0700
Subject: [PATCH 1/2] initial implementation
---
clang/lib/Basic/Targets/NVPTX.cpp | 7 +-
clang/test/CodeGen/target-data.c | 2 +-
clang/test/CodeGenCUDA/builtins-sm90.cu | 2 +-
llvm/docs/NVPTXUsage.rst | 6 +-
llvm/include/llvm/IR/IntrinsicsNVVM.td | 45 +--
llvm/include/llvm/Support/NVPTXAddrSpace.h | 1 +
llvm/lib/IR/AutoUpgrade.cpp | 87 ++++++
.../NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp | 1 +
llvm/lib/Target/NVPTX/NVPTX.h | 1 +
llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp | 5 +
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 11 +-
llvm/lib/Target/NVPTX/NVPTXInstrInfo.td | 1 +
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 10 +-
llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp | 14 +-
llvm/lib/Target/NVPTX/NVPTXUtilities.h | 2 +
.../Assembler/auto_upgrade_nvvm_intrinsics.ll | 57 ++++
.../CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll | 96 +++----
llvm/test/CodeGen/NVPTX/cp-async-bulk.ll | 18 +-
.../NVPTX/distributed-shared-cluster.ll | 258 ++++++++++++++++++
llvm/test/CodeGen/NVPTX/nvptx-aa.ll | 12 +-
.../include/mlir/Dialect/LLVMIR/NVVMDialect.h | 5 +-
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td | 5 +-
.../Target/LLVMIR/nvvm/tma_bulk_copy.mlir | 24 +-
23 files changed, 558 insertions(+), 112 deletions(-)
create mode 100644 llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp
index 5931a77a85fec..08c8460045c6a 100644
--- a/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/clang/lib/Basic/Targets/NVPTX.cpp
@@ -71,10 +71,11 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
if (TargetPointerWidth == 32)
resetDataLayout(
- "e-p:32:32-p6:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
+ "e-p:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
else if (Opts.NVPTXUseShortPointers)
- resetDataLayout("e-p3:32:32-p4:32:32-p5:32:32-p6:32:32-i64:64-i128:128-v16:"
- "16-v32:32-n16:32:64");
+ resetDataLayout(
+ "e-p3:32:32-p4:32:32-p5:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:"
+ "16-v32:32-n16:32:64");
else
resetDataLayout("e-p6:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
diff --git a/clang/test/CodeGen/target-data.c b/clang/test/CodeGen/target-data.c
index fe29aadb1dd53..9cb00e8ee73d3 100644
--- a/clang/test/CodeGen/target-data.c
+++ b/clang/test/CodeGen/target-data.c
@@ -160,7 +160,7 @@
// RUN: %clang_cc1 -triple nvptx-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=NVPTX
-// NVPTX: target datalayout = "e-p:32:32-p6:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64"
+// NVPTX: target datalayout = "e-p:32:32-p6:32:32-p7:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64"
// RUN: %clang_cc1 -triple nvptx64-unknown -o - -emit-llvm %s | \
// RUN: FileCheck %s -check-prefix=NVPTX64
diff --git a/clang/test/CodeGenCUDA/builtins-sm90.cu b/clang/test/CodeGenCUDA/builtins-sm90.cu
index a639c7716adb1..f4746df944536 100644
--- a/clang/test/CodeGenCUDA/builtins-sm90.cu
+++ b/clang/test/CodeGenCUDA/builtins-sm90.cu
@@ -50,7 +50,7 @@ __attribute__((global)) void kernel(long *out, void *ptr, unsigned u) {
auto * sptr = (__attribute__((address_space(3))) void *)ptr;
// CHECK: call ptr @llvm.nvvm.mapa(ptr %{{.*}}, i32 %{{.*}})
out[i++] = (long) __nvvm_mapa(ptr, u);
- // CHECK: call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
+ // CHECK: call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
out[i++] = (long) __nvvm_mapa_shared_cluster(sptr, u);
// CHECK: call i32 @llvm.nvvm.getctarank(ptr {{.*}})
out[i++] = __nvvm_getctarank(ptr);
diff --git a/llvm/docs/NVPTXUsage.rst b/llvm/docs/NVPTXUsage.rst
index 621879fc5648b..2ce9a4540034c 100644
--- a/llvm/docs/NVPTXUsage.rst
+++ b/llvm/docs/NVPTXUsage.rst
@@ -499,7 +499,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %mbar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
+ declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %mbar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
Overview:
"""""""""
@@ -563,7 +563,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %mbar, ptr addrspace(3) %src, i32 %size)
+ declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %mbar, ptr addrspace(3) %src, i32 %size)
Overview:
"""""""""
@@ -718,7 +718,7 @@ Syntax:
.. code-block:: llvm
- declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
+ declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr %tensor_map, i32 %d0, i16 %mc, i64 %ch, i1 %flag_mc, i1 %flag_ch)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(..., i32 %d0, i32 %d1, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(..., i32 %d0, i32 %d1, i32 %d2, ...)
declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(..., i32 %d0, i32 %d1, i32 %d2, i32 %d3, ...)
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 4aeb1d8a2779e..f053fa6e2bf22 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -131,6 +131,7 @@ def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
+def llvm_dshared_ptr_ty : LLVMQualPointerType<7>; // (dshared)ptr
//
// MISC
@@ -691,15 +692,15 @@ class CP_ASYNC_BULK_TENSOR_G2S_INTR<int dim, string mode> {
list<LLVMType> Im2ColOffsetsTy = !listsplat(llvm_i16_ty, NumIm2ColOffsets);
list<LLVMType> TensorDimsTy = !listsplat(llvm_i32_ty, dim);
list<LLVMType> ArgsTy = !listconcat(
- [llvm_shared_ptr_ty, // dst_smem_ptr
- llvm_shared_ptr_ty, // mbarrier_smem_ptr
- llvm_ptr_ty], // tensormap_ptr
- TensorDimsTy, // actual tensor dims
- Im2ColOffsetsTy, // im2col offsets
- [llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty] // Flag for cache_hint
+ [llvm_dshared_ptr_ty, // dst_smem_ptr
+ llvm_shared_ptr_ty, // mbarrier_smem_ptr
+ llvm_ptr_ty], // tensormap_ptr
+ TensorDimsTy, // actual tensor dims
+ Im2ColOffsetsTy, // im2col offsets
+ [llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty] // Flag for cache_hint
);
int TempFlagsStartIdx = !add(dim, 5);
@@ -5087,7 +5088,7 @@ def int_nvvm_mapa
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa">;
def int_nvvm_mapa_shared_cluster
- : DefaultAttrsIntrinsic<[llvm_shared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
+ : DefaultAttrsIntrinsic<[llvm_dshared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa.shared.cluster">;
def int_nvvm_getctarank
@@ -5187,14 +5188,14 @@ def int_nvvm_discard_L2 : DefaultAttrsIntrinsic<[],
// From Global to Shared Cluster
def int_nvvm_cp_async_bulk_global_to_shared_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_shared_ptr_ty, // dst_smem_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_global_ptr_ty, // src_gmem_ptr
- llvm_i32_ty, // copy_size
- llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty], // Flag for cache_hint
+ [llvm_dshared_ptr_ty, // dst_dsmem_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_global_ptr_ty, // src_gmem_ptr
+ llvm_i32_ty, // copy_size
+ llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty], // Flag for cache_hint
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
@@ -5204,10 +5205,10 @@ def int_nvvm_cp_async_bulk_global_to_shared_cluster
// From Shared CTA to Shared Cluster
def int_nvvm_cp_async_bulk_shared_cta_to_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_shared_ptr_ty, // dst_smem_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_shared_ptr_ty, // src_smem_ptr
- llvm_i32_ty], // copy_size
+ [llvm_dshared_ptr_ty, // dst_dsmem_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_shared_ptr_ty, // src_smem_ptr
+ llvm_i32_ty], // copy_size
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
diff --git a/llvm/include/llvm/Support/NVPTXAddrSpace.h b/llvm/include/llvm/Support/NVPTXAddrSpace.h
index 486a396621da1..a3eac31f2e5e9 100644
--- a/llvm/include/llvm/Support/NVPTXAddrSpace.h
+++ b/llvm/include/llvm/Support/NVPTXAddrSpace.h
@@ -25,6 +25,7 @@ enum AddressSpace : unsigned {
ADDRESS_SPACE_CONST = 4,
ADDRESS_SPACE_LOCAL = 5,
ADDRESS_SPACE_TENSOR = 6,
+ ADDRESS_SPACE_DSHARED = 7,
ADDRESS_SPACE_PARAM = 101,
};
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 0b329d91c3c7c..7482014d3c168 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -46,6 +46,7 @@
#include "llvm/Support/AMDGPUAddrSpace.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/NVPTXAddrSpace.h"
#include "llvm/Support/Regex.h"
#include "llvm/TargetParser/Triple.h"
#include <cstdint>
@@ -938,6 +939,47 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
return false; // No other 'arm.*', 'aarch64.*'.
}
+static Intrinsic::ID shouldUpgradeNVPTXDSharedIntrinsic(Function *F,
+ StringRef Name) {
+ if (Name.consume_front("mapa.shared.cluster"))
+ if (F->getReturnType()->getPointerAddressSpace() ==
+ NVPTXAS::ADDRESS_SPACE_SHARED)
+ return Intrinsic::nvvm_mapa_shared_cluster;
+
+ if (Name.consume_front("cp.async.bulk.")) {
+ Intrinsic::ID ID =
+ StringSwitch<Intrinsic::ID>(Name)
+ .Case("global.to.shared.cluster",
+ Intrinsic::nvvm_cp_async_bulk_global_to_shared_cluster)
+ .Case("shared.cta.to.cluster",
+ Intrinsic::nvvm_cp_async_bulk_shared_cta_to_cluster)
+ .Case("tensor.g2s.im2col.3d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_3d)
+ .Case("tensor.g2s.im2col.4d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_4d)
+ .Case("tensor.g2s.im2col.5d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_5d)
+ .Case("tensor.g2s.tile.1d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_1d)
+ .Case("tensor.g2s.tile.2d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_2d)
+ .Case("tensor.g2s.tile.3d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d)
+ .Case("tensor.g2s.tile.4d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d)
+ .Case("tensor.g2s.tile.5d",
+ Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d)
+ .Default(Intrinsic::not_intrinsic);
+
+ if (ID != Intrinsic::not_intrinsic)
+ if (F->getArg(0)->getType()->getPointerAddressSpace() ==
+ NVPTXAS::ADDRESS_SPACE_SHARED)
+ return ID;
+ }
+
+ return Intrinsic::not_intrinsic;
+}
+
static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {
if (Name.consume_front("abs."))
return StringSwitch<Intrinsic::ID>(Name)
@@ -1284,6 +1326,14 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
}
}
+ // Upgrade Distributed Shared Memory Intrinsics
+ Intrinsic::ID IID = shouldUpgradeNVPTXDSharedIntrinsic(F, Name);
+ if (IID != Intrinsic::not_intrinsic) {
+ rename(F);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
+ return true;
+ }
+
// The following nvvm intrinsics correspond exactly to an LLVM idiom, but
// not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
//
@@ -4704,6 +4754,43 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
CI->eraseFromParent();
return;
}
+ case Intrinsic::nvvm_mapa_shared_cluster: {
+ // Create a new call with the correct address space.
+ NewCall =
+ Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)});
+ Value *Res = NewCall;
+ Res = Builder.CreateAddrSpaceCast(
+ Res, Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_GENERIC));
+ Res = Builder.CreateAddrSpaceCast(
+ Res, Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_SHARED));
+ NewCall->takeName(CI);
+ CI->replaceAllUsesWith(Res);
+ CI->eraseFromParent();
+ return;
+ }
+ case Intrinsic::nvvm_cp_async_bulk_global_to_shared_cluster:
+ case Intrinsic::nvvm_cp_async_bulk_shared_cta_to_cluster:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_3d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_4d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_5d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_1d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_2d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d:
+ case Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d: {
+
+ SmallVector<Value *, 4> Args(CI->args());
+ Args[0] = Builder.CreateAddrSpaceCast(
+ Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_GENERIC));
+ Args[0] = Builder.CreateAddrSpaceCast(
+ Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_DSHARED));
+
+ NewCall = Builder.CreateCall(NewFn, Args);
+ NewCall->takeName(CI);
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ return;
+ }
case Intrinsic::riscv_sha256sig0:
case Intrinsic::riscv_sha256sig1:
case Intrinsic::riscv_sha256sum0:
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index e42e738b9973f..9ab59c1c144f3 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -288,6 +288,7 @@ void NVPTXInstPrinter::printLdStCode(const MCInst *MI, int OpNum,
case NVPTX::AddressSpace::Global:
case NVPTX::AddressSpace::Const:
case NVPTX::AddressSpace::Shared:
+ case NVPTX::AddressSpace::Dshared:
case NVPTX::AddressSpace::Param:
case NVPTX::AddressSpace::Local:
O << "." << A;
diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h
index 98e77ca80b8d5..c20c522f36bd3 100644
--- a/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/llvm/lib/Target/NVPTX/NVPTX.h
@@ -176,6 +176,7 @@ enum AddressSpace : AddressSpaceUnderlyingType {
Shared = 3,
Const = 4,
Local = 5,
+ Dshared = 7,
// NVPTX Backend Private:
Param = 101
diff --git a/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp b/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
index b910ccab21bf3..60bc22f5f589c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
@@ -86,6 +86,11 @@ static AliasResult::Kind getAliasResult(unsigned AS1, unsigned AS2) {
// TODO: cvta.param is not yet supported. We need to change aliasing
// rules once it is added.
+ // Distributed shared memory aliases with shared memory.
+ if (((AS1 == ADDRESS_SPACE_SHARED) && (AS2 == ADDRESS_SPACE_DSHARED)) ||
+ ((AS1 == ADDRESS_SPACE_DSHARED) && (AS2 == ADDRESS_SPACE_SHARED)))
+ return AliasResult::MayAlias;
+
return (AS1 == AS2 ? AliasResult::MayAlias : AliasResult::NoAlias);
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index ec1f969494cd1..34ddfd3c411a8 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -513,6 +513,8 @@ static std::optional<unsigned> convertAS(unsigned AS) {
return NVPTX::AddressSpace::Global;
case llvm::ADDRESS_SPACE_SHARED:
return NVPTX::AddressSpace::Shared;
+ case llvm::ADDRESS_SPACE_DSHARED:
+ return NVPTX::AddressSpace::Dshared;
case llvm::ADDRESS_SPACE_GENERIC:
return NVPTX::AddressSpace::Generic;
case llvm::ADDRESS_SPACE_PARAM:
@@ -658,7 +660,8 @@ getOperationOrderings(MemSDNode *N, const NVPTXSubtarget *Subtarget) {
bool AddrGenericOrGlobalOrShared =
(CodeAddrSpace == NVPTX::AddressSpace::Generic ||
CodeAddrSpace == NVPTX::AddressSpace::Global ||
- CodeAddrSpace == NVPTX::AddressSpace::Shared);
+ CodeAddrSpace == NVPTX::AddressSpace::Shared ||
+ CodeAddrSpace == NVPTX::AddressSpace::Dshared);
if (!AddrGenericOrGlobalOrShared)
return NVPTX::Ordering::NotAtomic;
@@ -979,6 +982,9 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
case ADDRESS_SPACE_SHARED:
Opc = TM.is64Bit() ? NVPTX::cvta_shared_64 : NVPTX::cvta_shared;
break;
+ case ADDRESS_SPACE_DSHARED:
+ Opc = TM.is64Bit() ? NVPTX::cvta_dshared_64 : NVPTX::cvta_dshared;
+ break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_const_64 : NVPTX::cvta_const;
break;
@@ -1001,6 +1007,9 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
case ADDRESS_SPACE_SHARED:
Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_64 : NVPTX::cvta_to_shared;
break;
+ case ADDRESS_SPACE_DSHARED:
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_dshared_64 : NVPTX::cvta_to_dshared;
+ break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_to_const_64 : NVPTX::cvta_to_const;
break;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 16b489afddf5c..4cf5292983048 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -137,6 +137,7 @@ def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
def hasVote : Predicate<"Subtarget->hasVote()">;
def hasDouble : Predicate<"Subtarget->hasDouble()">;
+def hasClusters : Predicate<"Subtarget->hasClusters()">;
def hasLDG : Predicate<"Subtarget->hasLDG()">;
def hasLDU : Predicate<"Subtarget->hasLDU()">;
def hasPTXASUnreachableBug : Predicate<"Subtarget->hasPTXASUnreachableBug()">;
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 8528ff702f236..19b370e4ce6f9 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -33,6 +33,9 @@ def AS_match {
code shared = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_SHARED);
}];
+ code dshared = [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_DSHARED);
+ }];
code global = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_GLOBAL);
}];
@@ -1979,10 +1982,11 @@ class ATOMIC_GLOBAL_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.global>;
class ATOMIC_SHARED_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.shared>;
+class ATOMIC_DSHARED_CHK <dag frag>
+ : PatFrag<!setdagop(frag, ops), frag, AS_match.dshared>;
class ATOMIC_GENERIC_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.generic>;
-
multiclass F_ATOMIC_2<RegTyInfo t, string sem_str, string as_str, string op_str,
SDPatternOperator op, list<Predicate> preds> {
defvar asm_str = "atom" # sem_str # as_str # "." # op_str # " \t$dst, [$addr], $b;";
@@ -2034,6 +2038,7 @@ multiclass F_ATOMIC_2_AS<RegTyInfo t, SDPatternOperator frag, string op_str, lis
defvar frag_pat = (frag node:$a, node:$b);
defm _G : F_ATOMIC_2<t, "", ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_2<t, "", ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
+ defm _DS : F_ATOMIC_2<t, "", ".shared::cluster", op_str, ATOMIC_DSHARED_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
defm _GEN : F_ATOMIC_2<t, "", "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2041,6 +2046,7 @@ multiclass F_ATOMIC_3_AS<RegTyInfo t, SDPatternOperator frag, string sem_str, st
defvar frag_pat = (frag node:$a, node:$b, node:$c);
defm _G : F_ATOMIC_3<t, sem_str, ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_3<t, sem_str, ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
+ defm _DS : F_ATOMIC_3<t, sem_str, ".shared::cluster", op_str, ATOMIC_DSHARED_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
defm _GEN : F_ATOMIC_3<t, sem_str, "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2337,11 +2343,13 @@ multiclass G_TO_NG<string Str> {
defm cvta_local : NG_TO_G<"local">;
defm cvta_shared : NG_TO_G<"shared">;
+defm cvta_dshared : NG_TO_G<"shared::cluster">;
defm cvta_global : NG_TO_G<"global">;
defm cvta_const : NG_TO_G<"const">;
defm cvta_to_local : G_TO_NG<"local">;
defm cvta_to_shared : G_TO_NG<"shared">;
+defm cvta_to_dshared : G_TO_NG<"shared::cluster">;
defm cvta_to_global : G_TO_NG<"global">;
defm cvta_to_const : G_TO_NG<"const">;
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index a4c3b43aec9f2..88896c8f407ad 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -117,13 +117,15 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeNVPTXTarget() {
static std::string computeDataLayout(bool is64Bit, bool UseShortPointers) {
std::string Ret = "e";
- if (!is64Bit)
- Ret += "-p:32:32";
- else if (UseShortPointers)
- Ret += "-p3:32:32-p4:32:32-p5:32:32";
-
// Tensor Memory (addrspace:6) is always 32-bits.
- Ret += "-p6:32:32";
+ // Distributed Shared Memory (addrspace:7) follows shared memory
+ // (addrspace:3).
+ if (!is64Bit)
+ Ret += "-p:32:32-p6:32:32-p7:32:32";
+ else if (UseShortPointers) {
+ Ret += "-p3:32:32-p4:32:32-p5:32:32-p6:32:32-p7:32:32";
+ } else
+ Ret += "-p6:32:32";
Ret += "-i64:64-i128:128-v16:16-v32:32-n16:32:64";
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
index 9283b398a9c14..931ae9e78d381 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -161,6 +161,8 @@ inline std::string AddressSpaceToString(AddressSpace A) {
return "const";
case AddressSpace::Shared:
return "shared";
+ case AddressSpace::Dshared:
+ return "shared::cluster";
case AddressSpace::Param:
return "param";
case AddressSpace::Local:
diff --git a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
index 74b9640df6977..614fea7635653 100644
--- a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -55,6 +55,19 @@ declare float @llvm.nvvm.ldg.global.f.f32.p0(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.inc.32(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.dec.32(ptr, i32)
+declare ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i64)
+
+declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+
; CHECK-LABEL: @simple_upgrade
define void @simple_upgrade(i32 %a, i64 %b, i16 %c) {
; CHECK: call i32 @llvm.bitreverse.i32(i32 %a)
@@ -239,3 +252,47 @@ define i32 @atomics(ptr %p0, i32 %a) {
ret i32 %r2
}
+; CHECK-LABEL: @nvvm_shared_cluster_intrinsics
+define void @nvvm_shared_cluster_intrinsics(ptr addrspace(3) %p0, i64 %offset) {
+; CHECK: %[[ASC1:.*]] = addrspacecast ptr addrspace(3) %p0 to ptr addrspace(7)
+; CHECK: %[[CALL:.*]] = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(7) %[[ASC1]], i64 %offset)
+; CHECK: %[[ASC2:.*]] = addrspacecast ptr addrspace(7) %[[CALL]] to ptr addrspace(3)
+ %r = call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i64 %offset)
+ ret void
+}
+
+; CHECK-LABEL: @nvvm_cp_async_bulk_intrinsics
+define void @nvvm_cp_async_bulk_intrinsics(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, ptr addrspace(3) %src_shared, i32 %size) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
+ call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
+ ret void
+}
+
+; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_im2col
+define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i16 0, i64 0, i1 false, i1 false)
+ ret void
+}
+
+; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_tile
+define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i16 0, i64 0, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i16 0, i64 0, i1 false, i1 false)
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll b/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
index 521dd567f837e..530a896642b89 100644
--- a/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
+++ b/llvm/test/CodeGen/NVPTX/cp-async-bulk-tensor-g2s.ll
@@ -6,18 +6,18 @@
target triple = "nvptx64-nvidia-cuda"
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_1d
-define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_1d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -55,18 +55,18 @@ define void @cp_async_bulk_tensor_g2s_tile_1d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 undef, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 undef, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_2d
-define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_2d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -106,18 +106,18 @@ define void @cp_async_bulk_tensor_g2s_tile_2d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 undef, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 undef, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_3d
-define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_3d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -159,18 +159,18 @@ define void @cp_async_bulk_tensor_g2s_tile_3d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_4d
-define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_4d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -214,18 +214,18 @@ define void @cp_async_bulk_tensor_g2s_tile_4d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_tile_5d
-define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_tile_5d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -271,18 +271,18 @@ define void @cp_async_bulk_tensor_g2s_tile_5d(ptr addrspace(3) %d, ptr addrspace
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.tile.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_3d
-define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_3d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<3>;
@@ -326,18 +326,18 @@ define void @cp_async_bulk_tensor_g2s_im2col_3d(ptr addrspace(3) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], {%rs1}, %rs2;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.3d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5}], [%r2], {%rs1}, %rs2, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_4d
-define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_4d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<4>;
@@ -385,18 +385,18 @@ define void @cp_async_bulk_tensor_g2s_im2col_4d(ptr addrspace(3) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], {%rs1, %rs2}, %rs3;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.4d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6}], [%r2], {%rs1, %rs2}, %rs3, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
; CHECK-LABEL: cp_async_bulk_tensor_g2s_im2col_5d
-define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_tensor_g2s_im2col_5d(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<5>;
@@ -448,12 +448,12 @@ define void @cp_async_bulk_tensor_g2s_im2col_5d(ptr addrspace(3) %d, ptr addrspa
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], {%rs1, %rs2, %rs3}, %rs4;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.tensor.5d.shared::cluster.global.im2col.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r1], [%rd1, {%r3, %r4, %r5, %r6, %r7}], [%r2], {%rs1, %rs2, %rs3}, %rs4, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll b/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
index aa8f553351b46..bf1b86e37ae72 100644
--- a/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
+++ b/llvm/test/CodeGen/NVPTX/cp-async-bulk.ll
@@ -6,12 +6,12 @@
target triple = "nvptx64-nvidia-cuda"
-declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
+declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.global(ptr addrspace(1), ptr addrspace(3), i32, i64, i1)
-declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32)
+declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7), ptr addrspace(3), ptr addrspace(3), i32)
declare void @llvm.nvvm.cp.async.bulk.prefetch.L2(ptr addrspace(1), i32, i64, i1)
-define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(3) %dst, i32 %size, i16 %mc, i64 %ch) {
+define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size, i16 %mc, i64 %ch) {
; CHECK-PTX64-LABEL: cp_async_bulk_g2s(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b16 %rs<2>;
@@ -49,10 +49,10 @@ define void @cp_async_bulk_g2s(ptr addrspace(1) %src, ptr addrspace(3) %bar, ptr
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster [%r2], [%rd1], %r3, [%r1], %rs1;
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes.multicast::cluster.L2::cache_hint [%r2], [%rd1], %r3, [%r1], %rs1, %rd2;
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 0, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 %ch, i1 0, i1 1)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 0, i1 1, i1 0)
- tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 0, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 %ch, i1 0, i1 1)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 0, i1 1, i1 0)
+ tail call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 %mc, i64 %ch, i1 1, i1 1)
ret void
}
@@ -89,7 +89,7 @@ define void @cp_async_bulk_s2g(ptr addrspace(3) %src, ptr addrspace(1) %dst, i32
ret void
}
-define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3) %bar, ptr addrspace(3) %dst, i32 %size) {
+define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3) %bar, ptr addrspace(7) %dst, i32 %size) {
; CHECK-PTX64-LABEL: cp_async_bulk_cta_to_cluster(
; CHECK-PTX64: {
; CHECK-PTX64-NEXT: .reg .b32 %r<2>;
@@ -114,7 +114,7 @@ define void @cp_async_bulk_cta_to_cluster(ptr addrspace(3) %src, ptr addrspace(3
; CHECK-PTX-SHARED32-NEXT: ld.param.u32 %r4, [cp_async_bulk_cta_to_cluster_param_3];
; CHECK-PTX-SHARED32-NEXT: cp.async.bulk.shared::cluster.shared::cta.mbarrier::complete_tx::bytes [%r3], [%r1], %r4, [%r2];
; CHECK-PTX-SHARED32-NEXT: ret;
- tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src, i32 %size)
+ tail call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src, i32 %size)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
new file mode 100644
index 0000000000000..f499500a64ebf
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
@@ -0,0 +1,258 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -o - -mcpu=sm_90 -march=nvptx64 -mattr=+ptx80 | FileCheck %s
+; RUN: %if ptxas-12.0 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80| %ptxas-verify -arch=sm_90 %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+ at llvm.used = appending global [1 x ptr] [ptr @test_distributed_shared_cluster], section "llvm.metadata"
+
+declare ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i32)
+declare i1 @llvm.nvvm.isspacep.shared.cluster(ptr)
+declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
+declare ptr @llvm.nvvm.mapa(ptr, i32)
+
+define i32 @test_distributed_shared_cluster(ptr %ptr, ptr addrspace(3) %smem_ptr) local_unnamed_addr {
+; CHECK-LABEL: test_distributed_shared_cluster(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<13>;
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b32 %r<69>;
+; CHECK-NEXT: .reg .f32 %f<2>;
+; CHECK-NEXT: .reg .b64 %rd<24>;
+; CHECK-NEXT: .reg .f64 %fd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.u64 %rd2, [test_distributed_shared_cluster_param_0];
+; CHECK-NEXT: ld.param.u64 %rd3, [test_distributed_shared_cluster_param_1];
+; CHECK-NEXT: mov.u32 %r24, %ctaid.x;
+; CHECK-NEXT: xor.b32 %r25, %r24, 1;
+; CHECK-NEXT: isspacep.shared::cluster %p1, %rd2;
+; CHECK-NEXT: mapa.u64 %rd4, %rd2, %r25;
+; CHECK-NEXT: isspacep.shared::cluster %p2, %rd4;
+; CHECK-NEXT: mapa.shared::cluster.u64 %rd5, %rd3, %r25;
+; CHECK-NEXT: mov.b16 %rs1, 0x3C00;
+; CHECK-NEXT: atom.shared::cluster.add.noftz.f16 %rs2, [%rd5], %rs1;
+; CHECK-NEXT: mov.b16 %rs3, 0x3F80;
+; CHECK-NEXT: atom.shared::cluster.add.noftz.bf16 %rs4, [%rd5], %rs3;
+; CHECK-NEXT: atom.shared::cluster.add.f32 %f1, [%rd5], 0f3F800000;
+; CHECK-NEXT: atom.shared::cluster.add.f64 %fd1, [%rd5], 0d3FF0000000000000;
+; CHECK-NEXT: atom.shared::cluster.add.u32 %r26, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.add.u64 %rd6, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.exch.b32 %r27, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.exch.b64 %rd7, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.min.s32 %r28, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.min.s64 %rd8, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.min.u32 %r29, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.min.u64 %rd9, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.max.s32 %r30, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.max.s64 %rd10, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.max.u32 %r31, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.max.u64 %rd11, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.inc.u32 %r32, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.dec.u32 %r33, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.and.b32 %r34, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.and.b64 %rd12, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.or.b32 %r35, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.or.b64 %rd13, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.xor.b32 %r36, [%rd5], 1;
+; CHECK-NEXT: atom.shared::cluster.xor.b64 %rd14, [%rd5], 1;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r37, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r38, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r39, [%rd5], 1, 0;
+; CHECK-NEXT: atom.release.shared::cluster.cas.b32 %r40, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b32 %r41, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b32 %r42, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r43, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r44, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b32 %r45, [%rd5], 1, 0;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b64 %rd15, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd16, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd17, [%rd5], 1, 0;
+; CHECK-NEXT: atom.release.shared::cluster.cas.b64 %rd18, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b64 %rd19, [%rd5], 1, 0;
+; CHECK-NEXT: atom.acq_rel.shared::cluster.cas.b64 %rd20, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd21, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd22, [%rd5], 1, 0;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: atom.acquire.shared::cluster.cas.b64 %rd23, [%rd5], 1, 0;
+; CHECK-NEXT: and.b64 %rd1, %rd5, -4;
+; CHECK-NEXT: cvt.u32.u64 %r46, %rd5;
+; CHECK-NEXT: and.b32 %r47, %r46, 3;
+; CHECK-NEXT: shl.b32 %r1, %r47, 3;
+; CHECK-NEXT: mov.b32 %r48, 65535;
+; CHECK-NEXT: shl.b32 %r49, %r48, %r1;
+; CHECK-NEXT: not.b32 %r2, %r49;
+; CHECK-NEXT: mov.b32 %r50, 1;
+; CHECK-NEXT: shl.b32 %r3, %r50, %r1;
+; CHECK-NEXT: ld.shared::cluster.u32 %r51, [%rd1];
+; CHECK-NEXT: and.b32 %r64, %r51, %r2;
+; CHECK-NEXT: $L__BB0_1: // %partword.cmpxchg.loop33
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r52, %r64, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r6, [%rd1], %r52, %r64;
+; CHECK-NEXT: setp.eq.s32 %p3, %r6, %r52;
+; CHECK-NEXT: @%p3 bra $L__BB0_3;
+; CHECK-NEXT: // %bb.2: // %partword.cmpxchg.failure32
+; CHECK-NEXT: // in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT: and.b32 %r7, %r6, %r2;
+; CHECK-NEXT: setp.ne.s32 %p4, %r64, %r7;
+; CHECK-NEXT: mov.b32 %r64, %r7;
+; CHECK-NEXT: @%p4 bra $L__BB0_1;
+; CHECK-NEXT: $L__BB0_3: // %partword.cmpxchg.end31
+; CHECK-NEXT: ld.shared::cluster.u32 %r53, [%rd1];
+; CHECK-NEXT: and.b32 %r65, %r53, %r2;
+; CHECK-NEXT: $L__BB0_4: // %partword.cmpxchg.loop23
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r54, %r65, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r10, [%rd1], %r54, %r65;
+; CHECK-NEXT: setp.eq.s32 %p5, %r10, %r54;
+; CHECK-NEXT: @%p5 bra $L__BB0_6;
+; CHECK-NEXT: // %bb.5: // %partword.cmpxchg.failure22
+; CHECK-NEXT: // in Loop: Header=BB0_4 Depth=1
+; CHECK-NEXT: and.b32 %r11, %r10, %r2;
+; CHECK-NEXT: setp.ne.s32 %p6, %r65, %r11;
+; CHECK-NEXT: mov.b32 %r65, %r11;
+; CHECK-NEXT: @%p6 bra $L__BB0_4;
+; CHECK-NEXT: $L__BB0_6: // %partword.cmpxchg.end21
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: ld.shared::cluster.u32 %r55, [%rd1];
+; CHECK-NEXT: and.b32 %r66, %r55, %r2;
+; CHECK-NEXT: $L__BB0_7: // %partword.cmpxchg.loop13
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r56, %r66, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r14, [%rd1], %r56, %r66;
+; CHECK-NEXT: setp.eq.s32 %p7, %r14, %r56;
+; CHECK-NEXT: @%p7 bra $L__BB0_9;
+; CHECK-NEXT: // %bb.8: // %partword.cmpxchg.failure12
+; CHECK-NEXT: // in Loop: Header=BB0_7 Depth=1
+; CHECK-NEXT: and.b32 %r15, %r14, %r2;
+; CHECK-NEXT: setp.ne.s32 %p8, %r66, %r15;
+; CHECK-NEXT: mov.b32 %r66, %r15;
+; CHECK-NEXT: @%p8 bra $L__BB0_7;
+; CHECK-NEXT: $L__BB0_9: // %partword.cmpxchg.end11
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: ld.shared::cluster.u32 %r57, [%rd1];
+; CHECK-NEXT: and.b32 %r67, %r57, %r2;
+; CHECK-NEXT: $L__BB0_10: // %partword.cmpxchg.loop3
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r58, %r67, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r18, [%rd1], %r58, %r67;
+; CHECK-NEXT: setp.eq.s32 %p9, %r18, %r58;
+; CHECK-NEXT: @%p9 bra $L__BB0_12;
+; CHECK-NEXT: // %bb.11: // %partword.cmpxchg.failure2
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=1
+; CHECK-NEXT: and.b32 %r19, %r18, %r2;
+; CHECK-NEXT: setp.ne.s32 %p10, %r67, %r19;
+; CHECK-NEXT: mov.b32 %r67, %r19;
+; CHECK-NEXT: @%p10 bra $L__BB0_10;
+; CHECK-NEXT: $L__BB0_12: // %partword.cmpxchg.end1
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: fence.sc.sys;
+; CHECK-NEXT: ld.shared::cluster.u32 %r59, [%rd1];
+; CHECK-NEXT: and.b32 %r68, %r59, %r2;
+; CHECK-NEXT: $L__BB0_13: // %partword.cmpxchg.loop
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: or.b32 %r60, %r68, %r3;
+; CHECK-NEXT: atom.relaxed.shared::cluster.cas.b32 %r22, [%rd1], %r60, %r68;
+; CHECK-NEXT: setp.eq.s32 %p11, %r22, %r60;
+; CHECK-NEXT: @%p11 bra $L__BB0_15;
+; CHECK-NEXT: // %bb.14: // %partword.cmpxchg.failure
+; CHECK-NEXT: // in Loop: Header=BB0_13 Depth=1
+; CHECK-NEXT: and.b32 %r23, %r22, %r2;
+; CHECK-NEXT: setp.ne.s32 %p12, %r68, %r23;
+; CHECK-NEXT: mov.b32 %r68, %r23;
+; CHECK-NEXT: @%p12 bra $L__BB0_13;
+; CHECK-NEXT: $L__BB0_15: // %partword.cmpxchg.end
+; CHECK-NEXT: fence.acq_rel.sys;
+; CHECK-NEXT: selp.u32 %r61, 1, 0, %p1;
+; CHECK-NEXT: selp.u32 %r62, 1, 0, %p2;
+; CHECK-NEXT: add.s32 %r63, %r61, %r62;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r63;
+; CHECK-NEXT: ret;
+entry:
+ %0 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
+ %1 = xor i32 %0, 1
+ %2 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %ptr)
+ %3 = tail call ptr @llvm.nvvm.mapa(ptr %ptr, i32 %1)
+ %4 = tail call i1 @llvm.nvvm.isspacep.shared.cluster(ptr %3)
+ %dsmem_ptr = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %smem_ptr, i32 %1)
+
+ ; Floating point atomic operations
+ %5 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, half 1.000000e+00 seq_cst
+ %6 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, bfloat 1.000000e+00 seq_cst
+ %7 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, float 1.000000e+00 seq_cst
+ %8 = atomicrmw fadd ptr addrspace(7) %dsmem_ptr, double 1.000000e+00 seq_cst
+
+ ; Integer add operations
+ %9 = atomicrmw add ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %10 = atomicrmw add ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Exchange operations
+ %11 = atomicrmw xchg ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %12 = atomicrmw xchg ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Min operations (signed and unsigned)
+ %13 = atomicrmw min ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %14 = atomicrmw min ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %15 = atomicrmw umin ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %16 = atomicrmw umin ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Max operations (signed and unsigned)
+ %17 = atomicrmw max ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %18 = atomicrmw max ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %19 = atomicrmw umax ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %20 = atomicrmw umax ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Inc/Dec operations (32-bit only)
+ %21 = atomicrmw uinc_wrap ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %22 = atomicrmw udec_wrap ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+
+ ; Bitwise operations
+ %23 = atomicrmw and ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %24 = atomicrmw and ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %25 = atomicrmw or ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %26 = atomicrmw or ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+ %27 = atomicrmw xor ptr addrspace(7) %dsmem_ptr, i32 1 monotonic
+ %28 = atomicrmw xor ptr addrspace(7) %dsmem_ptr, i64 1 monotonic
+
+ ; Compare-exchange operation - all memory ordering combinations for 32-bit
+ %29 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 monotonic monotonic
+ %30 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acquire monotonic
+ %31 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acquire acquire
+ %32 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 release monotonic
+ %33 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acq_rel monotonic
+ %34 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 acq_rel acquire
+ %35 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst monotonic
+ %36 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst acquire
+ %37 = cmpxchg ptr addrspace(7) %dsmem_ptr, i32 1, i32 0 seq_cst seq_cst
+
+ ; Compare-exchange operation - all memory ordering combinations for 64-bit
+ %38 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 monotonic monotonic
+ %39 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acquire monotonic
+ %40 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acquire acquire
+ %41 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 release monotonic
+ %42 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acq_rel monotonic
+ %43 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 acq_rel acquire
+ %44 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst monotonic
+ %45 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst acquire
+ %46 = cmpxchg ptr addrspace(7) %dsmem_ptr, i64 1, i64 0 seq_cst seq_cst
+
+ ; Compare-exchange operation - 16-bit
+ %47 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 monotonic monotonic
+ %48 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 acquire acquire
+ %49 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 release monotonic
+ %50 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 acq_rel acquire
+ %51 = cmpxchg ptr addrspace(7) %dsmem_ptr, i16 1, i16 0 seq_cst seq_cst
+
+ ; Return value preserves the isspacep test results
+ %52 = zext i1 %2 to i32
+ %53 = zext i1 %4 to i32
+ %ret = add i32 %52, %53
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/NVPTX/nvptx-aa.ll b/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
index 074e741dc3e94..c1071f4909986 100644
--- a/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
+++ b/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
@@ -18,25 +18,33 @@ target triple = "nvptx64-nvidia-cuda"
; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(5)* %local
; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(3)* %shared
; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(5)* %local
+; CHECK-ALIAS: MayAlias: i8 addrspace(7)* %dshared, i8* %gen
+; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(1)* %global
+; CHECK-ALIAS: MayAlias: i8 addrspace(7)* %dshared, i8 addrspace(3)* %shared
+; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(7)* %dshared
+; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(5)* %local
; CHECK-ALIAS: MayAlias: i8* %gen, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(101)* %param, i8 addrspace(3)* %shared
; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(101)* %param
+; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(101)* %param
-define i8 @test_alias(ptr %gen, ptr addrspace(1) %global, ptr addrspace(3) %shared, ptr addrspace(4) %const, ptr addrspace(5) %local) {
+define i8 @test_alias(ptr %gen, ptr addrspace(1) %global, ptr addrspace(3) %shared, ptr addrspace(4) %const, ptr addrspace(5) %local, ptr addrspace(7) %dshared) {
%param = addrspacecast ptr %gen to ptr addrspace(101)
%v1 = load i8, ptr %gen
%v2 = load i8, ptr addrspace(1) %global
%v3 = load i8, ptr addrspace(3) %shared
%v4 = load i8, ptr addrspace(4) %const
%v5 = load i8, ptr addrspace(5) %local
- %v6 = load i8, ptr addrspace(101) %param
+ %v6 = load i8, ptr addrspace(7) %dshared
+ %v7 = load i8, ptr addrspace(101) %param
%res1 = add i8 %v1, %v2
%res2 = add i8 %res1, %v3
%res3 = add i8 %res2, %v4
%res4 = add i8 %res3, %v5
%res5 = add i8 %res4, %v6
+ %res6 = add i8 %res4, %v7
ret i8 %res5
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
index a9270c6f52344..fceaaa36cf8e8 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
@@ -43,7 +43,10 @@ enum NVVMMemorySpace {
/// Tensor memory space identifier.
/// Tensor memory is available only in arch-accelerated
/// variants from sm100 onwards.
- kTensorMemorySpace = 6
+ kTensorMemorySpace = 6,
+ /// Distributed shared memory space identifier.
+ /// Distributed shared memory is available only in sm80+.
+ kDSharedMemorySpace = 7,
};
/// Return the element type and number of elements associated with a wmma matrix
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 0a6e66919f021..9f66e77085ce7 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -25,6 +25,7 @@ def LLVM_PointerGeneric : LLVM_PointerInAddressSpace<0>;
def LLVM_PointerGlobal : LLVM_PointerInAddressSpace<1>;
def LLVM_PointerShared : LLVM_PointerInAddressSpace<3>;
def LLVM_PointerTensor : LLVM_PointerInAddressSpace<6>;
+def LLVM_PointerDShared : LLVM_PointerInAddressSpace<7>;
//===----------------------------------------------------------------------===//
// NVVM dialect definitions
@@ -2252,7 +2253,7 @@ def NVVM_CpAsyncBulkGlobalToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerShared:$dstMem,
+ LLVM_PointerDShared:$dstMem,
LLVM_PointerGlobal:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size,
@@ -2309,7 +2310,7 @@ def NVVM_CpAsyncBulkSharedCTAToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerShared:$dstMem,
+ LLVM_PointerDShared:$dstMem,
LLVM_PointerShared:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size);
diff --git a/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir b/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
index 0e3f98a134491..39b703d9a9677 100644
--- a/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
+++ b/mlir/test/Target/LLVMIR/nvvm/tma_bulk_copy.mlir
@@ -1,25 +1,25 @@
// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
// CHECK-LABEL: @llvm_nvvm_cp_async_bulk_global_to_shared_cluster
-llvm.func @llvm_nvvm_cp_async_bulk_global_to_shared_cluster(%dst : !llvm.ptr<3>, %src : !llvm.ptr<1>, %mbar : !llvm.ptr<3>, %size : i32, %mc : i16, %ch : i64) {
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST:.*]], ptr addrspace(3) %[[MBAR:.*]], ptr addrspace(1) %[[SRC:.*]], i32 %[[SIZE:.*]], i16 0, i64 0, i1 false, i1 false)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 0, i64 %[[CH:.*]], i1 false, i1 true)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC:.*]], i64 0, i1 true, i1 false)
- // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC]], i64 %[[CH]], i1 true, i1 true)
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size : !llvm.ptr<3>, !llvm.ptr<1>
+llvm.func @llvm_nvvm_cp_async_bulk_global_to_shared_cluster(%dst : !llvm.ptr<7>, %src : !llvm.ptr<1>, %mbar : !llvm.ptr<3>, %size : i32, %mc : i16, %ch : i64) {
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST:.*]], ptr addrspace(3) %[[MBAR:.*]], ptr addrspace(1) %[[SRC:.*]], i32 %[[SIZE:.*]], i16 0, i64 0, i1 false, i1 false)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 0, i64 %[[CH:.*]], i1 false, i1 true)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC:.*]], i64 0, i1 true, i1 false)
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %[[DST]], ptr addrspace(3) %[[MBAR]], ptr addrspace(1) %[[SRC]], i32 %[[SIZE]], i16 %[[MC]], i64 %[[CH]], i1 true, i1 true)
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size : !llvm.ptr<7>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size l2_cache_hint = %ch : !llvm.ptr<3>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size l2_cache_hint = %ch : !llvm.ptr<7>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc : !llvm.ptr<3>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc : !llvm.ptr<7>, !llvm.ptr<1>
- nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc l2_cache_hint = %ch : !llvm.ptr<3>, !llvm.ptr<1>
+ nvvm.cp.async.bulk.shared.cluster.global %dst, %src, %mbar, %size multicast_mask = %mc l2_cache_hint = %ch : !llvm.ptr<7>, !llvm.ptr<1>
llvm.return
}
// CHECK-LABEL: @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster
-llvm.func @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster(%dst : !llvm.ptr<3>, %src : !llvm.ptr<3>, %mbar : !llvm.ptr<3>, %size : i32) {
- // CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %0, ptr addrspace(3) %2, ptr addrspace(3) %1, i32 %3)
- nvvm.cp.async.bulk.shared.cluster.shared.cta %dst, %src, %mbar, %size : !llvm.ptr<3>, !llvm.ptr<3>
+llvm.func @llvm_nvvm_cp_async_bulk_shared_cta_to_shared_cluster(%dst : !llvm.ptr<7>, %src : !llvm.ptr<3>, %mbar : !llvm.ptr<3>, %size : i32) {
+ // CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %0, ptr addrspace(3) %2, ptr addrspace(3) %1, i32 %3)
+ nvvm.cp.async.bulk.shared.cluster.shared.cta %dst, %src, %mbar, %size : !llvm.ptr<7>, !llvm.ptr<3>
llvm.return
}
>From 250db657d48e1757e9a832473c532ade2e8afeaa Mon Sep 17 00:00:00 2001
From: Modi Mo <mmo at nvidia.com>
Date: Fri, 11 Apr 2025 19:30:32 -0700
Subject: [PATCH 2/2] change to shared_cluster, update tests
---
llvm/include/llvm/IR/IntrinsicsNVVM.td | 54 +++++++--------
llvm/include/llvm/Support/NVPTXAddrSpace.h | 2 +-
llvm/lib/IR/AutoUpgrade.cpp | 8 +--
.../NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp | 2 +-
llvm/lib/Target/NVPTX/NVPTX.h | 2 +-
llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp | 5 +-
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp | 16 +++--
llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 32 ++++-----
llvm/lib/Target/NVPTX/NVPTXUtilities.h | 2 +-
.../Assembler/auto_upgrade_nvvm_intrinsics.ll | 68 +++++++++----------
.../NVPTX/distributed-shared-cluster.ll | 4 +-
llvm/test/CodeGen/NVPTX/nvptx-aa.ll | 16 ++---
.../include/mlir/Dialect/LLVMIR/NVVMDialect.h | 2 +-
mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td | 6 +-
14 files changed, 111 insertions(+), 108 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index f053fa6e2bf22..0e606dc995a6b 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -127,11 +127,11 @@
// * llvm.nvvm.atomic.load.inc.32 --> atomicrmw uinc_wrap
// * llvm.nvvm.atomic.load.dec.32 --> atomicrmw udec_wrap
-def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
-def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
-def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
-def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
-def llvm_dshared_ptr_ty : LLVMQualPointerType<7>; // (dshared)ptr
+def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr
+def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr
+def llvm_local_ptr_ty : LLVMQualPointerType<5>; // (local)ptr
+def llvm_tmem_ptr_ty : LLVMQualPointerType<6>; // (tensor memory)ptr
+def llvm_shared_cluster_ptr_ty : LLVMQualPointerType<7>; // (shared_cluster)ptr
//
// MISC
@@ -692,15 +692,15 @@ class CP_ASYNC_BULK_TENSOR_G2S_INTR<int dim, string mode> {
list<LLVMType> Im2ColOffsetsTy = !listsplat(llvm_i16_ty, NumIm2ColOffsets);
list<LLVMType> TensorDimsTy = !listsplat(llvm_i32_ty, dim);
list<LLVMType> ArgsTy = !listconcat(
- [llvm_dshared_ptr_ty, // dst_smem_ptr
- llvm_shared_ptr_ty, // mbarrier_smem_ptr
- llvm_ptr_ty], // tensormap_ptr
- TensorDimsTy, // actual tensor dims
- Im2ColOffsetsTy, // im2col offsets
- [llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty] // Flag for cache_hint
+ [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
+ llvm_shared_ptr_ty, // mbarrier_smem_ptr
+ llvm_ptr_ty], // tensormap_ptr
+ TensorDimsTy, // actual tensor dims
+ Im2ColOffsetsTy, // im2col offsets
+ [llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty] // Flag for cache_hint
);
int TempFlagsStartIdx = !add(dim, 5);
@@ -5088,7 +5088,7 @@ def int_nvvm_mapa
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa">;
def int_nvvm_mapa_shared_cluster
- : DefaultAttrsIntrinsic<[llvm_dshared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
+ : DefaultAttrsIntrinsic<[llvm_shared_cluster_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
"llvm.nvvm.mapa.shared.cluster">;
def int_nvvm_getctarank
@@ -5188,14 +5188,14 @@ def int_nvvm_discard_L2 : DefaultAttrsIntrinsic<[],
// From Global to Shared Cluster
def int_nvvm_cp_async_bulk_global_to_shared_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_dshared_ptr_ty, // dst_dsmem_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_global_ptr_ty, // src_gmem_ptr
- llvm_i32_ty, // copy_size
- llvm_i16_ty, // cta_mask
- llvm_i64_ty, // cache_hint
- llvm_i1_ty, // Flag for cta_mask
- llvm_i1_ty], // Flag for cache_hint
+ [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_global_ptr_ty, // src_gmem_ptr
+ llvm_i32_ty, // copy_size
+ llvm_i16_ty, // cta_mask
+ llvm_i64_ty, // cache_hint
+ llvm_i1_ty, // Flag for cta_mask
+ llvm_i1_ty], // Flag for cache_hint
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
@@ -5205,10 +5205,10 @@ def int_nvvm_cp_async_bulk_global_to_shared_cluster
// From Shared CTA to Shared Cluster
def int_nvvm_cp_async_bulk_shared_cta_to_cluster
: DefaultAttrsIntrinsic<[],
- [llvm_dshared_ptr_ty, // dst_dsmem_ptr
- llvm_shared_ptr_ty, // mbarrier_ptr
- llvm_shared_ptr_ty, // src_smem_ptr
- llvm_i32_ty], // copy_size
+ [llvm_shared_cluster_ptr_ty, // dst_shared_cluster_ptr
+ llvm_shared_ptr_ty, // mbarrier_ptr
+ llvm_shared_ptr_ty, // src_smem_ptr
+ llvm_i32_ty], // copy_size
[IntrConvergent, IntrArgMemOnly,
WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<2>>,
NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
diff --git a/llvm/include/llvm/Support/NVPTXAddrSpace.h b/llvm/include/llvm/Support/NVPTXAddrSpace.h
index a3eac31f2e5e9..04f74c34787cc 100644
--- a/llvm/include/llvm/Support/NVPTXAddrSpace.h
+++ b/llvm/include/llvm/Support/NVPTXAddrSpace.h
@@ -25,7 +25,7 @@ enum AddressSpace : unsigned {
ADDRESS_SPACE_CONST = 4,
ADDRESS_SPACE_LOCAL = 5,
ADDRESS_SPACE_TENSOR = 6,
- ADDRESS_SPACE_DSHARED = 7,
+ ADDRESS_SPACE_SHARED_CLUSTER = 7,
ADDRESS_SPACE_PARAM = 101,
};
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 7482014d3c168..3da6bc95f0c3b 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -939,8 +939,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
return false; // No other 'arm.*', 'aarch64.*'.
}
-static Intrinsic::ID shouldUpgradeNVPTXDSharedIntrinsic(Function *F,
- StringRef Name) {
+static Intrinsic::ID shouldUpgradeNVPTXSharedClusterIntrinsic(Function *F,
+ StringRef Name) {
if (Name.consume_front("mapa.shared.cluster"))
if (F->getReturnType()->getPointerAddressSpace() ==
NVPTXAS::ADDRESS_SPACE_SHARED)
@@ -1327,7 +1327,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
}
// Upgrade Distributed Shared Memory Intrinsics
- Intrinsic::ID IID = shouldUpgradeNVPTXDSharedIntrinsic(F, Name);
+ Intrinsic::ID IID = shouldUpgradeNVPTXSharedClusterIntrinsic(F, Name);
if (IID != Intrinsic::not_intrinsic) {
rename(F);
NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
@@ -4783,7 +4783,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
Args[0] = Builder.CreateAddrSpaceCast(
Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_GENERIC));
Args[0] = Builder.CreateAddrSpaceCast(
- Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_DSHARED));
+ Args[0], Builder.getPtrTy(NVPTXAS::ADDRESS_SPACE_SHARED_CLUSTER));
NewCall = Builder.CreateCall(NewFn, Args);
NewCall->takeName(CI);
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index 9ab59c1c144f3..d0706ac5677ab 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -288,7 +288,7 @@ void NVPTXInstPrinter::printLdStCode(const MCInst *MI, int OpNum,
case NVPTX::AddressSpace::Global:
case NVPTX::AddressSpace::Const:
case NVPTX::AddressSpace::Shared:
- case NVPTX::AddressSpace::Dshared:
+ case NVPTX::AddressSpace::SharedCluster:
case NVPTX::AddressSpace::Param:
case NVPTX::AddressSpace::Local:
O << "." << A;
diff --git a/llvm/lib/Target/NVPTX/NVPTX.h b/llvm/lib/Target/NVPTX/NVPTX.h
index c20c522f36bd3..cf21ad991ccdf 100644
--- a/llvm/lib/Target/NVPTX/NVPTX.h
+++ b/llvm/lib/Target/NVPTX/NVPTX.h
@@ -176,7 +176,7 @@ enum AddressSpace : AddressSpaceUnderlyingType {
Shared = 3,
Const = 4,
Local = 5,
- Dshared = 7,
+ SharedCluster = 7,
// NVPTX Backend Private:
Param = 101
diff --git a/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp b/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
index 60bc22f5f589c..a579783802aa2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAliasAnalysis.cpp
@@ -87,8 +87,9 @@ static AliasResult::Kind getAliasResult(unsigned AS1, unsigned AS2) {
// rules once it is added.
// Distributed shared memory aliases with shared memory.
- if (((AS1 == ADDRESS_SPACE_SHARED) && (AS2 == ADDRESS_SPACE_DSHARED)) ||
- ((AS1 == ADDRESS_SPACE_DSHARED) && (AS2 == ADDRESS_SPACE_SHARED)))
+ if (((AS1 == ADDRESS_SPACE_SHARED) &&
+ (AS2 == ADDRESS_SPACE_SHARED_CLUSTER)) ||
+ ((AS1 == ADDRESS_SPACE_SHARED_CLUSTER) && (AS2 == ADDRESS_SPACE_SHARED)))
return AliasResult::MayAlias;
return (AS1 == AS2 ? AliasResult::MayAlias : AliasResult::NoAlias);
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 34ddfd3c411a8..5b27817181871 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -513,8 +513,8 @@ static std::optional<unsigned> convertAS(unsigned AS) {
return NVPTX::AddressSpace::Global;
case llvm::ADDRESS_SPACE_SHARED:
return NVPTX::AddressSpace::Shared;
- case llvm::ADDRESS_SPACE_DSHARED:
- return NVPTX::AddressSpace::Dshared;
+ case llvm::ADDRESS_SPACE_SHARED_CLUSTER:
+ return NVPTX::AddressSpace::SharedCluster;
case llvm::ADDRESS_SPACE_GENERIC:
return NVPTX::AddressSpace::Generic;
case llvm::ADDRESS_SPACE_PARAM:
@@ -661,7 +661,7 @@ getOperationOrderings(MemSDNode *N, const NVPTXSubtarget *Subtarget) {
(CodeAddrSpace == NVPTX::AddressSpace::Generic ||
CodeAddrSpace == NVPTX::AddressSpace::Global ||
CodeAddrSpace == NVPTX::AddressSpace::Shared ||
- CodeAddrSpace == NVPTX::AddressSpace::Dshared);
+ CodeAddrSpace == NVPTX::AddressSpace::SharedCluster);
if (!AddrGenericOrGlobalOrShared)
return NVPTX::Ordering::NotAtomic;
@@ -982,8 +982,9 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
case ADDRESS_SPACE_SHARED:
Opc = TM.is64Bit() ? NVPTX::cvta_shared_64 : NVPTX::cvta_shared;
break;
- case ADDRESS_SPACE_DSHARED:
- Opc = TM.is64Bit() ? NVPTX::cvta_dshared_64 : NVPTX::cvta_dshared;
+ case ADDRESS_SPACE_SHARED_CLUSTER:
+ Opc = TM.is64Bit() ? NVPTX::cvta_shared_cluster_64
+ : NVPTX::cvta_shared_cluster;
break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_const_64 : NVPTX::cvta_const;
@@ -1007,8 +1008,9 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
case ADDRESS_SPACE_SHARED:
Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_64 : NVPTX::cvta_to_shared;
break;
- case ADDRESS_SPACE_DSHARED:
- Opc = TM.is64Bit() ? NVPTX::cvta_to_dshared_64 : NVPTX::cvta_to_dshared;
+ case ADDRESS_SPACE_SHARED_CLUSTER:
+ Opc = TM.is64Bit() ? NVPTX::cvta_to_shared_cluster_64
+ : NVPTX::cvta_to_shared_cluster;
break;
case ADDRESS_SPACE_CONST:
Opc = TM.is64Bit() ? NVPTX::cvta_to_const_64 : NVPTX::cvta_to_const;
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 19b370e4ce6f9..a6307ed6ec84c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -33,8 +33,8 @@ def AS_match {
code shared = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_SHARED);
}];
- code dshared = [{
- return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_DSHARED);
+ code shared_cluster = [{
+ return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_SHARED_CLUSTER);
}];
code global = [{
return ChkMemSDNodeAddressSpace(N, llvm::ADDRESS_SPACE_GLOBAL);
@@ -1982,8 +1982,8 @@ class ATOMIC_GLOBAL_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.global>;
class ATOMIC_SHARED_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.shared>;
-class ATOMIC_DSHARED_CHK <dag frag>
- : PatFrag<!setdagop(frag, ops), frag, AS_match.dshared>;
+class ATOMIC_SHARED_CLUSTER_CHK <dag frag>
+ : PatFrag<!setdagop(frag, ops), frag, AS_match.shared_cluster>;
class ATOMIC_GENERIC_CHK <dag frag>
: PatFrag<!setdagop(frag, ops), frag, AS_match.generic>;
@@ -2038,7 +2038,7 @@ multiclass F_ATOMIC_2_AS<RegTyInfo t, SDPatternOperator frag, string op_str, lis
defvar frag_pat = (frag node:$a, node:$b);
defm _G : F_ATOMIC_2<t, "", ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_2<t, "", ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
- defm _DS : F_ATOMIC_2<t, "", ".shared::cluster", op_str, ATOMIC_DSHARED_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
+ defm _S_C : F_ATOMIC_2<t, "", ".shared::cluster", op_str, ATOMIC_SHARED_CLUSTER_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
defm _GEN : F_ATOMIC_2<t, "", "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2046,7 +2046,7 @@ multiclass F_ATOMIC_3_AS<RegTyInfo t, SDPatternOperator frag, string sem_str, st
defvar frag_pat = (frag node:$a, node:$b, node:$c);
defm _G : F_ATOMIC_3<t, sem_str, ".global", op_str, ATOMIC_GLOBAL_CHK<frag_pat>, preds>;
defm _S : F_ATOMIC_3<t, sem_str, ".shared", op_str, ATOMIC_SHARED_CHK<frag_pat>, preds>;
- defm _DS : F_ATOMIC_3<t, sem_str, ".shared::cluster", op_str, ATOMIC_DSHARED_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
+ defm _S_C : F_ATOMIC_3<t, sem_str, ".shared::cluster", op_str, ATOMIC_SHARED_CLUSTER_CHK<frag_pat>, !listconcat([hasSM<80>], preds)>;
defm _GEN : F_ATOMIC_3<t, sem_str, "", op_str, ATOMIC_GENERIC_CHK<frag_pat>, preds>;
}
@@ -2341,17 +2341,17 @@ multiclass G_TO_NG<string Str> {
"cvta.to." # Str # ".u64 \t$result, $src;", []>;
}
-defm cvta_local : NG_TO_G<"local">;
-defm cvta_shared : NG_TO_G<"shared">;
-defm cvta_dshared : NG_TO_G<"shared::cluster">;
-defm cvta_global : NG_TO_G<"global">;
-defm cvta_const : NG_TO_G<"const">;
+defm cvta_local : NG_TO_G<"local">;
+defm cvta_shared : NG_TO_G<"shared">;
+defm cvta_shared_cluster : NG_TO_G<"shared::cluster">;
+defm cvta_global : NG_TO_G<"global">;
+defm cvta_const : NG_TO_G<"const">;
-defm cvta_to_local : G_TO_NG<"local">;
-defm cvta_to_shared : G_TO_NG<"shared">;
-defm cvta_to_dshared : G_TO_NG<"shared::cluster">;
-defm cvta_to_global : G_TO_NG<"global">;
-defm cvta_to_const : G_TO_NG<"const">;
+defm cvta_to_local : G_TO_NG<"local">;
+defm cvta_to_shared : G_TO_NG<"shared">;
+defm cvta_to_shared_cluster : G_TO_NG<"shared::cluster">;
+defm cvta_to_global : G_TO_NG<"global">;
+defm cvta_to_const : G_TO_NG<"const">;
// nvvm.ptr.param.to.gen
defm cvta_param : NG_TO_G<"param">;
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
index 931ae9e78d381..f6c7b9d43486c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -161,7 +161,7 @@ inline std::string AddressSpaceToString(AddressSpace A) {
return "const";
case AddressSpace::Shared:
return "shared";
- case AddressSpace::Dshared:
+ case AddressSpace::SharedCluster:
return "shared::cluster";
case AddressSpace::Param:
return "param";
diff --git a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
index 614fea7635653..65474899d33d1 100644
--- a/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_nvvm_intrinsics.ll
@@ -55,18 +55,20 @@ declare float @llvm.nvvm.ldg.global.f.f32.p0(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.inc.32(ptr, i32)
declare i32 @llvm.nvvm.atomic.load.dec.32(ptr, i32)
-declare ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i64)
+declare ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3), i32)
declare void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i16, i64, i1, i1)
declare void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3), ptr addrspace(3), ptr addrspace(3), i32)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
-declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3), ptr addrspace(3), ptr addrspace(1), i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i64, i1, i1)
+
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
+declare void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tm, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch, i1 %f1, i1 %f2);
; CHECK-LABEL: @simple_upgrade
define void @simple_upgrade(i32 %a, i64 %b, i16 %c) {
@@ -253,46 +255,44 @@ define i32 @atomics(ptr %p0, i32 %a) {
}
; CHECK-LABEL: @nvvm_shared_cluster_intrinsics
-define void @nvvm_shared_cluster_intrinsics(ptr addrspace(3) %p0, i64 %offset) {
-; CHECK: %[[ASC1:.*]] = addrspacecast ptr addrspace(3) %p0 to ptr addrspace(7)
-; CHECK: %[[CALL:.*]] = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(7) %[[ASC1]], i64 %offset)
-; CHECK: %[[ASC2:.*]] = addrspacecast ptr addrspace(7) %[[CALL]] to ptr addrspace(3)
- %r = call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i64 %offset)
+define void @nvvm_shared_cluster_intrinsics(ptr addrspace(3) %p0, i32 %offset) {
+; CHECK: %r = call ptr addrspace(7) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i32 %offset)
+ %r = call ptr addrspace(3) @llvm.nvvm.mapa.shared.cluster(ptr addrspace(3) %p0, i32 %offset)
ret void
}
; CHECK-LABEL: @nvvm_cp_async_bulk_intrinsics
define void @nvvm_cp_async_bulk_intrinsics(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, ptr addrspace(3) %src_shared, i32 %size) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
call void @llvm.nvvm.cp.async.bulk.global.to.shared.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 %size, i16 0, i64 0, i1 false, i1 false)
call void @llvm.nvvm.cp.async.bulk.shared.cta.to.cluster(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(3) %src_shared, i32 %size)
ret void
}
; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_im2col
-define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i16 0, i64 0, i1 false, i1 false)
+define void @nvvm_cp_async_bulk_tensor_g2s_im2col(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 %mc, i64 %ch) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(7) %6, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 %im2col0, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 %im2col0, i16 %im2col1, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.im2col.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %im2col0, i16 %im2col1, i16 %im2col2, i16 undef, i64 undef, i1 0, i1 0)
ret void
}
; CHECK-LABEL: @nvvm_cp_async_bulk_tensor_g2s_tile
-define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src) {
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i16 0, i64 0, i1 false, i1 false)
-; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i16 0, i64 0, i1 false, i1 false)
- call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %dst, ptr addrspace(3) %bar, ptr addrspace(1) %src, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i16 0, i64 0, i1 false, i1 false)
+define void @nvvm_cp_async_bulk_tensor_g2s_tile(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 %mc, i64 %ch) {
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(7) %2, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(7) %4, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(7) %6, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(7) %8, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 false, i1 false)
+; CHECK: call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(7) %10, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 false, i1 false)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.1d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.2d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.3d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.4d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i16 undef, i64 undef, i1 0, i1 0)
+ call void @llvm.nvvm.cp.async.bulk.tensor.g2s.tile.5d(ptr addrspace(3) %d, ptr addrspace(3) %bar, ptr %tmap, i32 %d0, i32 %d1, i32 %d2, i32 %d3, i32 %d4, i16 undef, i64 undef, i1 0, i1 0)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
index f499500a64ebf..aaad2b5dc880a 100644
--- a/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
+++ b/llvm/test/CodeGen/NVPTX/distributed-shared-cluster.ll
@@ -170,8 +170,8 @@ define i32 @test_distributed_shared_cluster(ptr %ptr, ptr addrspace(3) %smem_ptr
; CHECK-NEXT: @%p12 bra $L__BB0_13;
; CHECK-NEXT: $L__BB0_15: // %partword.cmpxchg.end
; CHECK-NEXT: fence.acq_rel.sys;
-; CHECK-NEXT: selp.u32 %r61, 1, 0, %p1;
-; CHECK-NEXT: selp.u32 %r62, 1, 0, %p2;
+; CHECK-NEXT: selp.b32 %r61, 1, 0, %p1;
+; CHECK-NEXT: selp.b32 %r62, 1, 0, %p2;
; CHECK-NEXT: add.s32 %r63, %r61, %r62;
; CHECK-NEXT: st.param.b32 [func_retval0], %r63;
; CHECK-NEXT: ret;
diff --git a/llvm/test/CodeGen/NVPTX/nvptx-aa.ll b/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
index c1071f4909986..924220326c341 100644
--- a/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
+++ b/llvm/test/CodeGen/NVPTX/nvptx-aa.ll
@@ -18,26 +18,26 @@ target triple = "nvptx64-nvidia-cuda"
; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(5)* %local
; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(3)* %shared
; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(5)* %local
-; CHECK-ALIAS: MayAlias: i8 addrspace(7)* %dshared, i8* %gen
-; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(1)* %global
-; CHECK-ALIAS: MayAlias: i8 addrspace(7)* %dshared, i8 addrspace(3)* %shared
-; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(7)* %dshared
-; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(5)* %local
+; CHECK-ALIAS: MayAlias: i8* %gen, i8 addrspace(7)* %shared_cluster
+; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(7)* %shared_cluster
+; CHECK-ALIAS: MayAlias: i8 addrspace(3)* %shared, i8 addrspace(7)* %shared_cluster
+; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(7)* %shared_cluster
+; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(7)* %shared_cluster
; CHECK-ALIAS: MayAlias: i8* %gen, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(1)* %global, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(101)* %param, i8 addrspace(3)* %shared
; CHECK-ALIAS: NoAlias: i8 addrspace(4)* %const, i8 addrspace(101)* %param
; CHECK-ALIAS: NoAlias: i8 addrspace(5)* %local, i8 addrspace(101)* %param
-; CHECK-ALIAS: NoAlias: i8 addrspace(7)* %dshared, i8 addrspace(101)* %param
+; CHECK-ALIAS: NoAlias: i8 addrspace(101)* %param, i8 addrspace(7)* %shared_cluster
-define i8 @test_alias(ptr %gen, ptr addrspace(1) %global, ptr addrspace(3) %shared, ptr addrspace(4) %const, ptr addrspace(5) %local, ptr addrspace(7) %dshared) {
+define i8 @test_alias(ptr %gen, ptr addrspace(1) %global, ptr addrspace(3) %shared, ptr addrspace(4) %const, ptr addrspace(5) %local, ptr addrspace(7) %shared_cluster) {
%param = addrspacecast ptr %gen to ptr addrspace(101)
%v1 = load i8, ptr %gen
%v2 = load i8, ptr addrspace(1) %global
%v3 = load i8, ptr addrspace(3) %shared
%v4 = load i8, ptr addrspace(4) %const
%v5 = load i8, ptr addrspace(5) %local
- %v6 = load i8, ptr addrspace(7) %dshared
+ %v6 = load i8, ptr addrspace(7) %shared_cluster
%v7 = load i8, ptr addrspace(101) %param
%res1 = add i8 %v1, %v2
%res2 = add i8 %res1, %v3
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
index fceaaa36cf8e8..cd6f3af2e9241 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMDialect.h
@@ -46,7 +46,7 @@ enum NVVMMemorySpace {
kTensorMemorySpace = 6,
/// Distributed shared memory space identifier.
/// Distributed shared memory is available only in sm80+.
- kDSharedMemorySpace = 7,
+ kSharedClusterMemorySpace = 7,
};
/// Return the element type and number of elements associated with a wmma matrix
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 9f66e77085ce7..5f41d428479b8 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -25,7 +25,7 @@ def LLVM_PointerGeneric : LLVM_PointerInAddressSpace<0>;
def LLVM_PointerGlobal : LLVM_PointerInAddressSpace<1>;
def LLVM_PointerShared : LLVM_PointerInAddressSpace<3>;
def LLVM_PointerTensor : LLVM_PointerInAddressSpace<6>;
-def LLVM_PointerDShared : LLVM_PointerInAddressSpace<7>;
+def LLVM_PointerSharedCluster : LLVM_PointerInAddressSpace<7>;
//===----------------------------------------------------------------------===//
// NVVM dialect definitions
@@ -2253,7 +2253,7 @@ def NVVM_CpAsyncBulkGlobalToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerDShared:$dstMem,
+ LLVM_PointerSharedCluster:$dstMem,
LLVM_PointerGlobal:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size,
@@ -2310,7 +2310,7 @@ def NVVM_CpAsyncBulkSharedCTAToSharedClusterOp :
}];
let arguments = (ins
- LLVM_PointerDShared:$dstMem,
+ LLVM_PointerSharedCluster:$dstMem,
LLVM_PointerShared:$srcMem,
LLVM_PointerShared:$mbar,
I32:$size);
More information about the llvm-commits
mailing list