[Mlir-commits] [mlir] [mlir] Decouple NVPTX target from CUDA toolkit presence (PR #93008)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed May 22 02:26:37 PDT 2024


https://github.com/tyb0807 created https://github.com/llvm/llvm-project/pull/93008

Currently we only allow to serialize GPU module if CUDA toolkit is present, while in fact only serializing to binary format requires the latter.

This change make it possible to serialize GPU module to offload or assembly formats even when CUDA toolkit is not provided. The condition to be able to serialize GPU module is NVPTX target having been specified when building LLVM.

>From b92aac4676ffdb52b005b856c16a616c52189ad8 Mon Sep 17 00:00:00 2001
From: Son Vu <son at brium.ai>
Date: Wed, 22 May 2024 11:22:06 +0200
Subject: [PATCH] [mlir] Decouple NVPTX target from CUDA toolkit presence

---
 mlir/lib/Target/LLVM/NVVM/Target.cpp          | 45 ++++++++++++-------
 .../Target/LLVM/SerializeNVVMTarget.cpp       |  2 +-
 2 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/mlir/lib/Target/LLVM/NVVM/Target.cpp b/mlir/lib/Target/LLVM/NVVM/Target.cpp
index e438ce84af1b5..02b1c1ae3a6b5 100644
--- a/mlir/lib/Target/LLVM/NVVM/Target.cpp
+++ b/mlir/lib/Target/LLVM/NVVM/Target.cpp
@@ -158,7 +158,7 @@ SerializeGPUModuleBase::loadBitcodeFiles(llvm::Module &module) {
   return std::move(bcFiles);
 }
 
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
 namespace {
 class NVPTXSerializer : public SerializeGPUModuleBase {
 public:
@@ -167,6 +167,15 @@ class NVPTXSerializer : public SerializeGPUModuleBase {
 
   gpu::GPUModuleOp getOperation();
 
+  std::optional<SmallVector<char, 0>>
+  moduleToObject(llvm::Module &llvmModule) override;
+
+private:
+  // Target options.
+  gpu::TargetOptions targetOptions;
+
+#if MLIR_ENABLE_CUDA_CONVERSIONS
+public:
   // Compile PTX to cubin using `ptxas`.
   std::optional<SmallVector<char, 0>>
   compileToBinary(const std::string &ptxCode);
@@ -175,9 +184,6 @@ class NVPTXSerializer : public SerializeGPUModuleBase {
   std::optional<SmallVector<char, 0>>
   compileToBinaryNVPTX(const std::string &ptxCode);
 
-  std::optional<SmallVector<char, 0>>
-  moduleToObject(llvm::Module &llvmModule) override;
-
 private:
   using TmpFile = std::pair<llvm::SmallString<128>, llvm::FileRemover>;
 
@@ -190,9 +196,7 @@ class NVPTXSerializer : public SerializeGPUModuleBase {
   // 2. In the system PATH.
   // 3. The path from `getCUDAToolkitPath()`.
   std::optional<std::string> findTool(StringRef tool);
-
-  // Target options.
-  gpu::TargetOptions targetOptions;
+#endif // MLIR_ENABLE_CUDA_CONVERSIONS
 };
 } // namespace
 
@@ -201,6 +205,11 @@ NVPTXSerializer::NVPTXSerializer(Operation &module, NVVMTargetAttr target,
     : SerializeGPUModuleBase(module, target, targetOptions),
       targetOptions(targetOptions) {}
 
+gpu::GPUModuleOp NVPTXSerializer::getOperation() {
+  return dyn_cast<gpu::GPUModuleOp>(&SerializeGPUModuleBase::getOperation());
+}
+
+#if MLIR_ENABLE_CUDA_CONVERSIONS
 std::optional<NVPTXSerializer::TmpFile>
 NVPTXSerializer::createTemp(StringRef name, StringRef suffix) {
   llvm::SmallString<128> filename;
@@ -214,10 +223,6 @@ NVPTXSerializer::createTemp(StringRef name, StringRef suffix) {
   return TmpFile(filename, llvm::FileRemover(filename.c_str()));
 }
 
-gpu::GPUModuleOp NVPTXSerializer::getOperation() {
-  return dyn_cast<gpu::GPUModuleOp>(&SerializeGPUModuleBase::getOperation());
-}
-
 std::optional<std::string> NVPTXSerializer::findTool(StringRef tool) {
   // Find the `tool` path.
   // 1. Check the toolkit path given in the command line.
@@ -512,10 +517,11 @@ NVPTXSerializer::compileToBinaryNVPTX(const std::string &ptxCode) {
   return binary;
 }
 #endif // MLIR_ENABLE_NVPTXCOMPILER
+#endif // MLIR_ENABLE_CUDA_CONVERSIONS
 
 std::optional<SmallVector<char, 0>>
 NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
-  // Return LLVM IR if the compilation target is offload.
+  // Return LLVM IR if the compilation target is `offload`.
 #define DEBUG_TYPE "serialize-to-llvm"
   LLVM_DEBUG({
     llvm::dbgs() << "LLVM IR for module: " << getOperation().getNameAttr()
@@ -549,7 +555,7 @@ NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
   });
 #undef DEBUG_TYPE
 
-  // Return PTX if the compilation target is assembly.
+  // Return PTX if the compilation target is `assembly`.
   if (targetOptions.getCompilationTarget() ==
       gpu::CompilationTarget::Assembly) {
     // Make sure to include the null terminator.
@@ -557,6 +563,13 @@ NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
     return SmallVector<char, 0>(bin.begin(), bin.end());
   }
 
+  // At this point, compilation target is either `binary` or `fatbinary`, which
+  // requires CUDA toolkit.
+  if (!(MLIR_ENABLE_CUDA_CONVERSIONS)) {
+    getOperation().emitError(
+        "CUDA toolkit not provided when trying to serialize GPU module.");
+    return std::nullopt;
+  }
   // Compile to binary.
 #if MLIR_ENABLE_NVPTXCOMPILER
   return compileToBinaryNVPTX(*serializedISA);
@@ -564,7 +577,7 @@ NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
   return compileToBinary(*serializedISA);
 #endif // MLIR_ENABLE_NVPTXCOMPILER
 }
-#endif // MLIR_ENABLE_CUDA_CONVERSIONS
+#endif // LLVM_HAS_NVPTX_TARGET
 
 std::optional<SmallVector<char, 0>>
 NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
@@ -576,7 +589,7 @@ NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
     module->emitError("Module must be a GPU module.");
     return std::nullopt;
   }
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
   NVPTXSerializer serializer(*module, cast<NVVMTargetAttr>(attribute), options);
   serializer.init();
   return serializer.run();
@@ -584,7 +597,7 @@ NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
   module->emitError(
       "The `NVPTX` target was not built. Please enable it when building LLVM.");
   return std::nullopt;
-#endif // MLIR_ENABLE_CUDA_CONVERSIONS
+#endif // LLVM_HAS_NVPTX_TARGET
 }
 
 Attribute
diff --git a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
index cea49356538f0..a8fe20d52fb2a 100644
--- a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
+++ b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
@@ -30,7 +30,7 @@
 using namespace mlir;
 
 // Skip the test if the NVPTX target was not built.
-#if MLIR_ENABLE_CUDA_CONVERSIONS
+#if LLVM_HAS_NVPTX_TARGET
 #define SKIP_WITHOUT_NVPTX(x) x
 #else
 #define SKIP_WITHOUT_NVPTX(x) DISABLED_##x



More information about the Mlir-commits mailing list