[Openmp-commits] [openmp] a5153db - [OpenMP][Offloading] Added support for multiple streams so that multiple kernels can be executed concurrently

Johannes Doerfert via Openmp-commits openmp-commits at lists.llvm.org
Tue Feb 11 20:08:06 PST 2020


Author: Johannes Doerfert
Date: 2020-02-11T22:07:14-06:00
New Revision: a5153dbc368e1a484e179fafce7a260410d20569

URL: https://github.com/llvm/llvm-project/commit/a5153dbc368e1a484e179fafce7a260410d20569
DIFF: https://github.com/llvm/llvm-project/commit/a5153dbc368e1a484e179fafce7a260410d20569.diff

LOG: [OpenMP][Offloading] Added support for multiple streams so that multiple kernels can be executed concurrently

Reviewed By: jdoerfert

Differential Revision: https://reviews.llvm.org/D74145

Added: 
    openmp/libomptarget/test/offloading/parallel_offloading_map.c

Modified: 
    openmp/libomptarget/plugins/cuda/src/rtl.cpp

Removed: 
    


################################################################################
diff  --git a/openmp/libomptarget/plugins/cuda/src/rtl.cpp b/openmp/libomptarget/plugins/cuda/src/rtl.cpp
index 49acb6cacbfe..54248daa7f19 100644
--- a/openmp/libomptarget/plugins/cuda/src/rtl.cpp
+++ b/openmp/libomptarget/plugins/cuda/src/rtl.cpp
@@ -10,10 +10,12 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include <atomic>
 #include <cassert>
 #include <cstddef>
 #include <cuda.h>
 #include <list>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -90,11 +92,13 @@ std::list<KernelTy> KernelsList;
 /// Class containing all the device information.
 class RTLDeviceInfoTy {
   std::vector<std::list<FuncOrGblEntryTy>> FuncGblEntries;
+  std::vector<std::unique_ptr<std::atomic_uint>> NextStreamId;
 
 public:
   int NumberOfDevices;
   std::vector<CUmodule> Modules;
   std::vector<CUcontext> Contexts;
+  std::vector<std::vector<CUstream>> Streams;
 
   // Device properties
   std::vector<int> ThreadsPerBlock;
@@ -108,6 +112,7 @@ class RTLDeviceInfoTy {
   // OpenMP Environment properties
   int EnvNumTeams;
   int EnvTeamLimit;
+  int EnvNumStreams;
 
   // OpenMP Requires Flags
   int64_t RequiresFlags;
@@ -173,6 +178,15 @@ class RTLDeviceInfoTy {
     E.Table.EntriesBegin = E.Table.EntriesEnd = 0;
   }
 
+  // Get the next stream on a given device in a round robin manner
+  CUstream &getNextStream(const int DeviceId) {
+    assert(DeviceId >= 0 &&
+           static_cast<size_t>(DeviceId) < NextStreamId.size() &&
+           "Unexpected device id!");
+    const unsigned int Id = NextStreamId[DeviceId]->fetch_add(1);
+    return Streams[DeviceId][Id % EnvNumStreams];
+  }
+
   RTLDeviceInfoTy() {
 #ifdef OMPTARGET_DEBUG
     if (char *envStr = getenv("LIBOMPTARGET_DEBUG")) {
@@ -205,6 +219,8 @@ class RTLDeviceInfoTy {
 
     FuncGblEntries.resize(NumberOfDevices);
     Contexts.resize(NumberOfDevices);
+    Streams.resize(NumberOfDevices);
+    NextStreamId.resize(NumberOfDevices);
     ThreadsPerBlock.resize(NumberOfDevices);
     BlocksPerGrid.resize(NumberOfDevices);
     WarpSize.resize(NumberOfDevices);
@@ -229,6 +245,23 @@ class RTLDeviceInfoTy {
       EnvNumTeams = -1;
     }
 
+    // By default let's create 256 streams per device
+    EnvNumStreams = 256;
+    envStr = getenv("LIBOMPTARGET_NUM_STREAMS");
+    if (envStr) {
+      EnvNumStreams = std::stoi(envStr);
+    }
+
+    // Initialize streams for each device
+    for (std::vector<CUstream> &S : Streams) {
+      S.resize(EnvNumStreams);
+    }
+
+    // Initialize the next stream id
+    for (std::unique_ptr<std::atomic_uint> &Ptr : NextStreamId) {
+      Ptr = std::make_unique<std::atomic_uint>(0);
+    }
+
     // Default state.
     RequiresFlags = OMP_REQ_UNDEFINED;
   }
@@ -244,6 +277,24 @@ class RTLDeviceInfoTy {
         }
       }
 
+    // Destroy streams before contexts
+    for (int I = 0; I < NumberOfDevices; ++I) {
+      CUresult err = cuCtxSetCurrent(Contexts[I]);
+      if (err != CUDA_SUCCESS) {
+        DP("Error when setting current CUDA context\n");
+        CUDA_ERR_STRING(err);
+      }
+
+      for (auto &S : Streams[I])
+        if (S) {
+          err = cuStreamDestroy(S);
+          if (err != CUDA_SUCCESS) {
+            DP("Error when destroying CUDA stream\n");
+            CUDA_ERR_STRING(err);
+          }
+        }
+    }
+
     // Destroy contexts
     for (auto &ctx : Contexts)
       if (ctx) {
@@ -294,6 +345,20 @@ int32_t __tgt_rtl_init_device(int32_t device_id) {
     return OFFLOAD_FAIL;
   }
 
+  err = cuCtxSetCurrent(DeviceInfo.Contexts[device_id]);
+  if (err != CUDA_SUCCESS) {
+    DP("Error when setting current CUDA context\n");
+    CUDA_ERR_STRING(err);
+  }
+
+  for (CUstream &Stream : DeviceInfo.Streams[device_id]) {
+    err = cuStreamCreate(&Stream, CU_STREAM_NON_BLOCKING);
+    if (err != CUDA_SUCCESS) {
+      DP("Error when creating CUDA stream\n");
+      CUDA_ERR_STRING(err);
+    }
+  }
+
   // Query attributes to determine number of threads/block and blocks/grid.
   int maxGridDimX;
   err = cuDeviceGetAttribute(&maxGridDimX, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X,
@@ -607,14 +672,26 @@ int32_t __tgt_rtl_data_submit(int32_t device_id, void *tgt_ptr, void *hst_ptr,
     return OFFLOAD_FAIL;
   }
 
-  err = cuMemcpyHtoD((CUdeviceptr)tgt_ptr, hst_ptr, size);
+  CUstream &Stream = DeviceInfo.getNextStream(device_id);
+
+  err = cuMemcpyHtoDAsync((CUdeviceptr)tgt_ptr, hst_ptr, size, Stream);
   if (err != CUDA_SUCCESS) {
     DP("Error when copying data from host to device. Pointers: host = " DPxMOD
-       ", device = " DPxMOD ", size = %" PRId64 "\n", DPxPTR(hst_ptr),
-       DPxPTR(tgt_ptr), size);
+       ", device = " DPxMOD ", size = %" PRId64 "\n",
+       DPxPTR(hst_ptr), DPxPTR(tgt_ptr), size);
+    CUDA_ERR_STRING(err);
+    return OFFLOAD_FAIL;
+  }
+
+  err = cuStreamSynchronize(Stream);
+  if (err != CUDA_SUCCESS) {
+    DP("Error when synchronizing async data transfer from host to device. "
+       "Pointers: host = " DPxMOD ", device = " DPxMOD ", size = %" PRId64 "\n",
+       DPxPTR(hst_ptr), DPxPTR(tgt_ptr), size);
     CUDA_ERR_STRING(err);
     return OFFLOAD_FAIL;
   }
+
   return OFFLOAD_SUCCESS;
 }
 
@@ -628,14 +705,26 @@ int32_t __tgt_rtl_data_retrieve(int32_t device_id, void *hst_ptr, void *tgt_ptr,
     return OFFLOAD_FAIL;
   }
 
-  err = cuMemcpyDtoH(hst_ptr, (CUdeviceptr)tgt_ptr, size);
+  CUstream &Stream = DeviceInfo.getNextStream(device_id);
+
+  err = cuMemcpyDtoHAsync(hst_ptr, (CUdeviceptr)tgt_ptr, size, Stream);
   if (err != CUDA_SUCCESS) {
     DP("Error when copying data from device to host. Pointers: host = " DPxMOD
-        ", device = " DPxMOD ", size = %" PRId64 "\n", DPxPTR(hst_ptr),
-        DPxPTR(tgt_ptr), size);
+       ", device = " DPxMOD ", size = %" PRId64 "\n",
+       DPxPTR(hst_ptr), DPxPTR(tgt_ptr), size);
+    CUDA_ERR_STRING(err);
+    return OFFLOAD_FAIL;
+  }
+
+  err = cuStreamSynchronize(Stream);
+  if (err != CUDA_SUCCESS) {
+    DP("Error when synchronizing async data transfer from device to host. "
+       "Pointers: host = " DPxMOD ", device = " DPxMOD ", size = %" PRId64 "\n",
+       DPxPTR(hst_ptr), DPxPTR(tgt_ptr), size);
     CUDA_ERR_STRING(err);
     return OFFLOAD_FAIL;
   }
+
   return OFFLOAD_SUCCESS;
 }
 
@@ -755,8 +844,11 @@ int32_t __tgt_rtl_run_target_team_region(int32_t device_id, void *tgt_entry_ptr,
   DP("Launch kernel with %d blocks and %d threads\n", cudaBlocksPerGrid,
      cudaThreadsPerBlock);
 
+  CUstream &Stream = DeviceInfo.getNextStream(device_id);
+
   err = cuLaunchKernel(KernelInfo->Func, cudaBlocksPerGrid, 1, 1,
-      cudaThreadsPerBlock, 1, 1, 0 /*bytes of shared memory*/, 0, &args[0], 0);
+                       cudaThreadsPerBlock, 1, 1, 0 /*bytes of shared memory*/,
+                       Stream, &args[0], 0);
   if (err != CUDA_SUCCESS) {
     DP("Device kernel launch failed!\n");
     CUDA_ERR_STRING(err);
@@ -766,7 +858,7 @@ int32_t __tgt_rtl_run_target_team_region(int32_t device_id, void *tgt_entry_ptr,
   DP("Launch of entry point at " DPxMOD " successful!\n",
       DPxPTR(tgt_entry_ptr));
 
-  CUresult sync_err = cuCtxSynchronize();
+  CUresult sync_err = cuStreamSynchronize(Stream);
   if (sync_err != CUDA_SUCCESS) {
     DP("Kernel execution error at " DPxMOD "!\n", DPxPTR(tgt_entry_ptr));
     CUDA_ERR_STRING(sync_err);

diff  --git a/openmp/libomptarget/test/offloading/parallel_offloading_map.c b/openmp/libomptarget/test/offloading/parallel_offloading_map.c
new file mode 100644
index 000000000000..bcdc6f96f4d0
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/parallel_offloading_map.c
@@ -0,0 +1,41 @@
+// RUN: %libomptarget-compilexx-run-and-check-aarch64-unknown-linux-gnu
+// RUN: %libomptarget-compilexx-run-and-check-powerpc64-ibm-linux-gnu
+// RUN: %libomptarget-compilexx-run-and-check-powerpc64le-ibm-linux-gnu
+// RUN: %libomptarget-compilexx-run-and-check-x86_64-pc-linux-gnu
+#include <assert.h>
+#include <stdio.h>
+
+int main(int argc, char *argv[]) {
+  const int num_threads = 64, N = 128;
+  int array[num_threads] = {0};
+
+#pragma omp parallel for
+  for (int i = 0; i < num_threads; ++i) {
+    int tmp[N];
+
+    for (int j = 0; j < N; ++j) {
+      tmp[j] = i;
+    }
+
+#pragma omp target teams distribute parallel for map(tofrom : tmp)
+    for (int j = 0; j < N; ++j) {
+      tmp[j] += j;
+    }
+
+    for (int j = 0; j < N; ++j) {
+      array[i] += tmp[j];
+    }
+  }
+
+  // Verify
+  for (int i = 0; i < num_threads; ++i) {
+    const int ref = (0 + N - 1) * N / 2 + i * N;
+    assert(array[i] == ref);
+  }
+
+  printf("PASS\n");
+
+  return 0;
+}
+
+// CHECK: PASS


        


More information about the Openmp-commits mailing list