[llvm] [flang][cuda] Remove error check from allocation and free call (PR #165022)

Valentin Clement バレンタイン クレメン via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 24 10:46:52 PDT 2025


https://github.com/clementval created https://github.com/llvm/llvm-project/pull/165022

As in https://github.com/llvm/llvm-project/pull/164463, do not do error checking in the runtime itself but let error go through as user might want to catch them for error recovery. 

>From 0f1b3357c3dc695e586292bc9a59617f1e2de3ee Mon Sep 17 00:00:00 2001
From: Valentin Clement <clementval at gmail.com>
Date: Fri, 24 Oct 2025 10:45:21 -0700
Subject: [PATCH] [flang][cuda] Remove error check from allocation and free
 call

---
 flang-rt/lib/cuda/allocator.cpp | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/flang-rt/lib/cuda/allocator.cpp b/flang-rt/lib/cuda/allocator.cpp
index 05d97a5db0451..5436051002265 100644
--- a/flang-rt/lib/cuda/allocator.cpp
+++ b/flang-rt/lib/cuda/allocator.cpp
@@ -138,23 +138,21 @@ void RTDEF(CUFRegisterAllocator)() {
 void *CUFAllocPinned(
     std::size_t sizeInBytes, [[maybe_unused]] std::int64_t *asyncObject) {
   void *p;
-  CUDA_REPORT_IF_ERROR(cudaMallocHost((void **)&p, sizeInBytes));
+  cudaMallocHost((void **)&p, sizeInBytes);
   return p;
 }
 
-void CUFFreePinned(void *p) { CUDA_REPORT_IF_ERROR(cudaFreeHost(p)); }
+void CUFFreePinned(void *p) { cudaFreeHost(p); }
 
 void *CUFAllocDevice(std::size_t sizeInBytes, std::int64_t *asyncObject) {
   void *p;
   if (Fortran::runtime::executionEnvironment.cudaDeviceIsManaged) {
-    CUDA_REPORT_IF_ERROR(
-        cudaMallocManaged((void **)&p, sizeInBytes, cudaMemAttachGlobal));
+    cudaMallocManaged((void **)&p, sizeInBytes, cudaMemAttachGlobal);
   } else {
     if (asyncObject == nullptr) {
-      CUDA_REPORT_IF_ERROR(cudaMalloc(&p, sizeInBytes));
+      cudaMalloc(&p, sizeInBytes);
     } else {
-      CUDA_REPORT_IF_ERROR(
-          cudaMallocAsync(&p, sizeInBytes, (cudaStream_t)*asyncObject));
+      cudaMallocAsync(&p, sizeInBytes, (cudaStream_t)*asyncObject);
       insertAllocation(p, sizeInBytes, (cudaStream_t)*asyncObject);
     }
   }
@@ -167,21 +165,20 @@ void CUFFreeDevice(void *p) {
   if (pos >= 0) {
     cudaStream_t stream = deviceAllocations[pos].stream;
     eraseAllocation(pos);
-    CUDA_REPORT_IF_ERROR(cudaFreeAsync(p, stream));
+    cudaFreeAsync(p, stream);
   } else {
-    CUDA_REPORT_IF_ERROR(cudaFree(p));
+    cudaFree(p);
   }
 }
 
 void *CUFAllocManaged(
     std::size_t sizeInBytes, [[maybe_unused]] std::int64_t *asyncObject) {
   void *p;
-  CUDA_REPORT_IF_ERROR(
-      cudaMallocManaged((void **)&p, sizeInBytes, cudaMemAttachGlobal));
+  cudaMallocManaged((void **)&p, sizeInBytes, cudaMemAttachGlobal);
   return reinterpret_cast<void *>(p);
 }
 
-void CUFFreeManaged(void *p) { CUDA_REPORT_IF_ERROR(cudaFree(p)); }
+void CUFFreeManaged(void *p) { cudaFree(p); }
 
 void *CUFAllocUnified(
     std::size_t sizeInBytes, [[maybe_unused]] std::int64_t *asyncObject) {



More information about the llvm-commits mailing list