[PATCH] D106401: [CUDA, MemCpyOpt] Add a flag to force-enable memcpyopt and use it for CUDA.

Artem Belevich via Phabricator via cfe-commits cfe-commits at lists.llvm.org
Fri Aug 6 11:22:25 PDT 2021


This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG6a9cf21f5a2d: [CUDA, MemCpyOpt] Add a flag to force-enable memcpyopt and use it for CUDA. (authored by tra).

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D106401/new/

https://reviews.llvm.org/D106401

Files:
  clang/lib/Driver/ToolChains/Cuda.cpp
  llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
  llvm/test/Transforms/MemCpyOpt/no-libcalls.ll


Index: llvm/test/Transforms/MemCpyOpt/no-libcalls.ll
===================================================================
--- llvm/test/Transforms/MemCpyOpt/no-libcalls.ll
+++ llvm/test/Transforms/MemCpyOpt/no-libcalls.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -memcpyopt < %s | FileCheck %s --check-prefixes=CHECK,LIBCALLS
 ; RUN: opt -S -memcpyopt -mtriple=amdgcn-- < %s | FileCheck %s --check-prefixes=CHECK,NO-LIBCALLS
+; RUN: opt -S -memcpyopt -mtriple=amdgcn-- -enable-memcpyopt-without-libcalls < %s \
+; RUN:     | FileCheck %s --check-prefixes=CHECK,LIBCALLS
 
 ; REQUIRES: amdgpu-registered-target
 
Index: llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
===================================================================
--- llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -67,6 +67,10 @@
 
 #define DEBUG_TYPE "memcpyopt"
 
+static cl::opt<bool> EnableMemCpyOptWithoutLibcalls(
+    "enable-memcpyopt-without-libcalls", cl::init(false), cl::Hidden,
+    cl::desc("Enable memcpyopt even when libcalls are disabled"));
+
 static cl::opt<bool>
     EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(true), cl::Hidden,
                     cl::desc("Use MemorySSA-backed MemCpyOpt."));
@@ -677,8 +681,9 @@
       // the corresponding libcalls are not available.
       // TODO: We should really distinguish between libcall availability and
       // our ability to introduce intrinsics.
-      if (T->isAggregateType() && TLI->has(LibFunc_memcpy) &&
-          TLI->has(LibFunc_memmove)) {
+      if (T->isAggregateType() &&
+          (EnableMemCpyOptWithoutLibcalls ||
+           (TLI->has(LibFunc_memcpy) && TLI->has(LibFunc_memmove)))) {
         MemoryLocation LoadLoc = MemoryLocation::get(LI);
 
         // We use alias analysis to check if an instruction may store to
@@ -806,7 +811,7 @@
   // this if the corresponding libfunc is not available.
   // TODO: We should really distinguish between libcall availability and
   // our ability to introduce intrinsics.
-  if (!TLI->has(LibFunc_memset))
+  if (!(TLI->has(LibFunc_memset) || EnableMemCpyOptWithoutLibcalls))
     return false;
 
   // There are two cases that are interesting for this code to handle: memcpy
Index: clang/lib/Driver/ToolChains/Cuda.cpp
===================================================================
--- clang/lib/Driver/ToolChains/Cuda.cpp
+++ clang/lib/Driver/ToolChains/Cuda.cpp
@@ -685,7 +685,8 @@
          "Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs.");
 
   if (DeviceOffloadingKind == Action::OFK_Cuda) {
-    CC1Args.push_back("-fcuda-is-device");
+    CC1Args.append(
+        {"-fcuda-is-device", "-mllvm", "-enable-memcpyopt-without-libcalls"});
 
     if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
                            options::OPT_fno_cuda_approx_transcendentals, false))


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D106401.364851.patch
Type: text/x-patch
Size: 2958 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/cfe-commits/attachments/20210806/7ea95285/attachment.bin>


More information about the cfe-commits mailing list