[Mlir-commits] [mlir] [mlir] Don't rely on values of MLIR_(CURDA|ROCM)_CONVERSIONS_ENABLED. (PR #82988)
Ingo Müller
llvmlistbot at llvm.org
Mon Feb 26 03:46:40 PST 2024
https://github.com/ingomueller-net created https://github.com/llvm/llvm-project/pull/82988
This PR changes the `#if XXX == 1` sections for these macros to `#ifdef` sections such that the actual values that the macros are defined with do not matter. This scheme is used for most (but not all) such macros in the MLIR code base and the fact that these two macros did not follow it has lead to bugs downstream (which assumed that `#ifdef` was being used and defined them to a value != 1).
>From ea2280f75c0afe9f2e401c25cf46de3291e5fef8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ingo=20M=C3=BCller?= <ingomueller at google.com>
Date: Mon, 26 Feb 2024 11:37:13 +0000
Subject: [PATCH] [mlir] Don't rely on values of
MLIR_(CURDA|ROCM)_CONVERSIONS_ENABLED.
This PR changes the `#if XXX == 1` sections for these macros to `#ifdef`
sections such that the actual values that the macros are defined with do
not matter. This scheme is used for most (but not all) such macros in
the MLIR code base and the fact that these two macros did not follow it
has lead to bugs downstream (which assumed that `#ifdef` was being used
and defined them to a value != 1).
---
mlir/CMakeLists.txt | 6 +++---
mlir/include/mlir/InitAllPasses.h | 2 +-
mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp | 2 +-
mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp | 4 ++--
mlir/lib/Target/LLVM/NVVM/Target.cpp | 10 +++++-----
mlir/lib/Target/LLVM/ROCDL/Target.cpp | 8 ++++----
mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp | 2 +-
mlir/unittests/Target/LLVM/SerializeROCDLTarget.cpp | 2 +-
8 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/mlir/CMakeLists.txt b/mlir/CMakeLists.txt
index 16c898bdeb6e00..d2a1d6c1340c0f 100644
--- a/mlir/CMakeLists.txt
+++ b/mlir/CMakeLists.txt
@@ -108,20 +108,20 @@ endif()
# is available
if ("NVPTX" IN_LIST LLVM_TARGETS_TO_BUILD)
set(MLIR_ENABLE_CUDA_CONVERSIONS 1)
+ # TODO: we should use a config.h file like LLVM does
+ add_definitions(-DMLIR_CUDA_CONVERSIONS_ENABLED)
else()
set(MLIR_ENABLE_CUDA_CONVERSIONS 0)
endif()
-# TODO: we should use a config.h file like LLVM does
-add_definitions(-DMLIR_CUDA_CONVERSIONS_ENABLED=${MLIR_ENABLE_CUDA_CONVERSIONS})
# Build the ROCm conversions and run according tests if the AMDGPU backend
# is available.
if ("AMDGPU" IN_LIST LLVM_TARGETS_TO_BUILD)
set(MLIR_ENABLE_ROCM_CONVERSIONS 1)
+ add_definitions(-DMLIR_ROCM_CONVERSIONS_ENABLED)
else()
set(MLIR_ENABLE_ROCM_CONVERSIONS 0)
endif()
-add_definitions(-DMLIR_ROCM_CONVERSIONS_ENABLED=${MLIR_ENABLE_ROCM_CONVERSIONS})
set(MLIR_ENABLE_CUDA_RUNNER 0 CACHE BOOL "Enable building the mlir CUDA runner")
set(MLIR_ENABLE_ROCM_RUNNER 0 CACHE BOOL "Enable building the mlir ROCm runner")
diff --git a/mlir/include/mlir/InitAllPasses.h b/mlir/include/mlir/InitAllPasses.h
index e28921619fe582..7f14577d98a561 100644
--- a/mlir/include/mlir/InitAllPasses.h
+++ b/mlir/include/mlir/InitAllPasses.h
@@ -96,7 +96,7 @@ inline void registerAllPasses() {
bufferization::registerBufferizationPipelines();
sparse_tensor::registerSparseTensorPipelines();
tosa::registerTosaToLinalgPipelines();
-#if MLIR_CUDA_CONVERSIONS_ENABLED
+#ifdef MLIR_CUDA_CONVERSIONS_ENABLED
gpu::registerGPUToNVVMPipeline();
#endif
}
diff --git a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
index 935f0deaf9c8a6..31699ee44b6671 100644
--- a/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
+++ b/mlir/lib/Dialect/GPU/Pipelines/GPUToNVVMPipeline.cpp
@@ -38,7 +38,7 @@
using namespace mlir;
-#if MLIR_CUDA_CONVERSIONS_ENABLED
+#ifdef MLIR_CUDA_CONVERSIONS_ENABLED
namespace {
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp b/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
index 0527073da85b69..9fe7b03a62a6ba 100644
--- a/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/ModuleToBinary.cpp
@@ -48,10 +48,10 @@ void GpuModuleToBinaryPass::getDependentDialects(
// Register all GPU related translations.
registry.insert<gpu::GPUDialect>();
registry.insert<LLVM::LLVMDialect>();
-#if MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#ifdef MLIR_CUDA_CONVERSIONS_ENABLED
registry.insert<NVVM::NVVMDialect>();
#endif
-#if MLIR_ROCM_CONVERSIONS_ENABLED == 1
+#ifdef MLIR_ROCM_CONVERSIONS_ENABLED
registry.insert<ROCDL::ROCDLDialect>();
#endif
registry.insert<spirv::SPIRVDialect>();
diff --git a/mlir/lib/Target/LLVM/NVVM/Target.cpp b/mlir/lib/Target/LLVM/NVVM/Target.cpp
index 71b15a92782ed7..66efc9f416f21f 100644
--- a/mlir/lib/Target/LLVM/NVVM/Target.cpp
+++ b/mlir/lib/Target/LLVM/NVVM/Target.cpp
@@ -156,7 +156,7 @@ SerializeGPUModuleBase::loadBitcodeFiles(llvm::Module &module) {
return std::move(bcFiles);
}
-#if MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#ifdef MLIR_CUDA_CONVERSIONS_ENABLED
namespace {
class NVPTXSerializer : public SerializeGPUModuleBase {
public:
@@ -555,14 +555,14 @@ NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
return SmallVector<char, 0>(bin.begin(), bin.end());
}
- // Compile to binary.
+ // Compile to binary.
#if MLIR_NVPTXCOMPILER_ENABLED == 1
return compileToBinaryNVPTX(*serializedISA);
#else
return compileToBinary(*serializedISA);
#endif // MLIR_NVPTXCOMPILER_ENABLED == 1
}
-#endif // MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#endif // MLIR_CUDA_CONVERSIONS_ENABLED
std::optional<SmallVector<char, 0>>
NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
@@ -574,7 +574,7 @@ NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
module->emitError("Module must be a GPU module.");
return std::nullopt;
}
-#if MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#ifdef MLIR_CUDA_CONVERSIONS_ENABLED
NVPTXSerializer serializer(*module, cast<NVVMTargetAttr>(attribute), options);
serializer.init();
return serializer.run();
@@ -582,7 +582,7 @@ NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
module->emitError(
"The `NVPTX` target was not built. Please enable it when building LLVM.");
return std::nullopt;
-#endif // MLIR_CUDA_CONVERSIONS_ENABLED == 1
+#endif // MLIR_CUDA_CONVERSIONS_ENABLED
}
Attribute
diff --git a/mlir/lib/Target/LLVM/ROCDL/Target.cpp b/mlir/lib/Target/LLVM/ROCDL/Target.cpp
index cdcef1d6b459c6..9e608843998103 100644
--- a/mlir/lib/Target/LLVM/ROCDL/Target.cpp
+++ b/mlir/lib/Target/LLVM/ROCDL/Target.cpp
@@ -120,7 +120,7 @@ void SerializeGPUModuleBase::init() {
static llvm::once_flag initializeBackendOnce;
llvm::call_once(initializeBackendOnce, []() {
// If the `AMDGPU` LLVM target was built, initialize it.
-#if MLIR_ROCM_CONVERSIONS_ENABLED == 1
+#ifdef MLIR_ROCM_CONVERSIONS_ENABLED
LLVMInitializeAMDGPUTarget();
LLVMInitializeAMDGPUTargetInfo();
LLVMInitializeAMDGPUTargetMC();
@@ -318,7 +318,7 @@ SerializeGPUModuleBase::assembleIsa(StringRef isa) {
return result;
}
-#if MLIR_ROCM_CONVERSIONS_ENABLED == 1
+#ifdef MLIR_ROCM_CONVERSIONS_ENABLED
namespace {
class AMDGPUSerializer : public SerializeGPUModuleBase {
public:
@@ -462,7 +462,7 @@ std::optional<SmallVector<char, 0>> ROCDLTargetAttrImpl::serializeToObject(
module->emitError("Module must be a GPU module.");
return std::nullopt;
}
-#if MLIR_ROCM_CONVERSIONS_ENABLED == 1
+#ifdef MLIR_ROCM_CONVERSIONS_ENABLED
AMDGPUSerializer serializer(*module, cast<ROCDLTargetAttr>(attribute),
options);
serializer.init();
@@ -471,7 +471,7 @@ std::optional<SmallVector<char, 0>> ROCDLTargetAttrImpl::serializeToObject(
module->emitError("The `AMDGPU` target was not built. Please enable it when "
"building LLVM.");
return std::nullopt;
-#endif // MLIR_ROCM_CONVERSIONS_ENABLED == 1
+#endif // MLIR_ROCM_CONVERSIONS_ENABLED
}
Attribute
diff --git a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
index 26bfbd5c11e81e..0cbc938e189b68 100644
--- a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
+++ b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
@@ -29,7 +29,7 @@
using namespace mlir;
// Skip the test if the NVPTX target was not built.
-#if MLIR_CUDA_CONVERSIONS_ENABLED == 0
+#ifndef MLIR_CUDA_CONVERSIONS_ENABLED
#define SKIP_WITHOUT_NVPTX(x) DISABLED_##x
#else
#define SKIP_WITHOUT_NVPTX(x) x
diff --git a/mlir/unittests/Target/LLVM/SerializeROCDLTarget.cpp b/mlir/unittests/Target/LLVM/SerializeROCDLTarget.cpp
index e91e69b102043a..5b3fe4979a2d01 100644
--- a/mlir/unittests/Target/LLVM/SerializeROCDLTarget.cpp
+++ b/mlir/unittests/Target/LLVM/SerializeROCDLTarget.cpp
@@ -31,7 +31,7 @@
using namespace mlir;
// Skip the test if the AMDGPU target was not built.
-#if MLIR_ROCM_CONVERSIONS_ENABLED == 0
+#ifndef MLIR_ROCM_CONVERSIONS_ENABLED
#define SKIP_WITHOUT_AMDGPU(x) DISABLED_##x
#else
#define SKIP_WITHOUT_AMDGPU(x) x
More information about the Mlir-commits
mailing list