[llvm] [clang] [CUDA][HIP] Improve variable registration with the new driver (PR #73177)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 22 14:01:13 PST 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-flang-openmp
@llvm/pr-subscribers-clang-codegen
@llvm/pr-subscribers-clang
Author: Joseph Huber (jhuber6)
<details>
<summary>Changes</summary>
Summary:
This patch adds support for registering texture / surface variables from
CUDA / HIP. Additionally, we now properly track the `extern` and `const`
flags that are also used in these runtime functions.
This does not implement the `managed` variables yet as those seem to
require some extra handling I'm not familiar with. The issue is that the
current offload entry isn't large enough to carry size and alignment
information along with an extra global.
---
Patch is 29.89 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/73177.diff
8 Files Affected:
- (modified) clang/lib/CodeGen/CGCUDANV.cpp (+19-6)
- (modified) clang/lib/CodeGen/CGCUDARuntime.h (+7-1)
- (modified) clang/test/CodeGenCUDA/offloading-entries.cu (+60-26)
- (modified) clang/test/Driver/linker-wrapper-image.c (+48-26)
- (modified) clang/tools/clang-linker-wrapper/OffloadWrapper.cpp (+53-5)
- (modified) llvm/include/llvm/Frontend/Offloading/Utility.h (+4-2)
- (modified) llvm/lib/Frontend/Offloading/Utility.cpp (+2-2)
- (modified) llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp (+1-1)
``````````diff
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index 66147f656071f53..eb059080b977872 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -1132,26 +1132,39 @@ void CGNVCUDARuntime::createOffloadingEntries() {
for (KernelInfo &I : EmittedKernels)
llvm::offloading::emitOffloadingEntry(
M, KernelHandles[I.Kernel->getName()],
- getDeviceSideName(cast<NamedDecl>(I.D)), 0,
+ getDeviceSideName(cast<NamedDecl>(I.D)), /*Flags=*/0, /*Data=*/0,
DeviceVarFlags::OffloadGlobalEntry, Section);
for (VarInfo &I : DeviceVars) {
uint64_t VarSize =
CGM.getDataLayout().getTypeAllocSize(I.Var->getValueType());
+ int32_t Flags =
+ (I.Flags.isExtern()
+ ? static_cast<int32_t>(DeviceVarFlags::OffloadGlobalExtern)
+ : 0) |
+ (I.Flags.isConstant()
+ ? static_cast<int32_t>(DeviceVarFlags::OffloadGlobalConstant)
+ : 0) |
+ (I.Flags.isNormalized()
+ ? static_cast<int32_t>(DeviceVarFlags::OffloadGlobalNormalized)
+ : 0);
if (I.Flags.getKind() == DeviceVarFlags::Variable) {
llvm::offloading::emitOffloadingEntry(
M, I.Var, getDeviceSideName(I.D), VarSize,
- I.Flags.isManaged() ? DeviceVarFlags::OffloadGlobalManagedEntry
- : DeviceVarFlags::OffloadGlobalEntry,
- Section);
+ (I.Flags.isManaged() ? DeviceVarFlags::OffloadGlobalManagedEntry
+ : DeviceVarFlags::OffloadGlobalEntry) |
+ Flags,
+ /*Data=*/0, Section);
} else if (I.Flags.getKind() == DeviceVarFlags::Surface) {
llvm::offloading::emitOffloadingEntry(
M, I.Var, getDeviceSideName(I.D), VarSize,
- DeviceVarFlags::OffloadGlobalSurfaceEntry, Section);
+ DeviceVarFlags::OffloadGlobalSurfaceEntry | Flags,
+ I.Flags.getSurfTexType(), Section);
} else if (I.Flags.getKind() == DeviceVarFlags::Texture) {
llvm::offloading::emitOffloadingEntry(
M, I.Var, getDeviceSideName(I.D), VarSize,
- DeviceVarFlags::OffloadGlobalTextureEntry, Section);
+ DeviceVarFlags::OffloadGlobalTextureEntry | Flags,
+ I.Flags.getSurfTexType(), Section);
}
}
}
diff --git a/clang/lib/CodeGen/CGCUDARuntime.h b/clang/lib/CodeGen/CGCUDARuntime.h
index 9a9c6d26cc63c40..a224cdf0054f952 100644
--- a/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/clang/lib/CodeGen/CGCUDARuntime.h
@@ -52,7 +52,7 @@ class CGCUDARuntime {
Texture, // Builtin texture
};
- /// The kind flag for an offloading entry.
+ /// The kind bit-field for an offloading entry.
enum OffloadEntryKindFlag : uint32_t {
/// Mark the entry as a global entry. This indicates the presense of a
/// kernel if the size field is zero and a variable otherwise.
@@ -63,6 +63,12 @@ class CGCUDARuntime {
OffloadGlobalSurfaceEntry = 0x2,
/// Mark the entry as a texture variable.
OffloadGlobalTextureEntry = 0x3,
+ /// Mark the entry as being extern.
+ OffloadGlobalExtern = 0x4,
+ /// Mark the entry as being constant.
+ OffloadGlobalConstant = 0x8,
+ /// Mark the entry as being a normalized surface.
+ OffloadGlobalNormalized = 0x16,
};
private:
diff --git a/clang/test/CodeGenCUDA/offloading-entries.cu b/clang/test/CodeGenCUDA/offloading-entries.cu
index 46235051f1e4f12..4f5cf65ecd0bde6 100644
--- a/clang/test/CodeGenCUDA/offloading-entries.cu
+++ b/clang/test/CodeGenCUDA/offloading-entries.cu
@@ -17,31 +17,47 @@
//.
// CUDA: @.omp_offloading.entry_name = internal unnamed_addr constant [8 x i8] c"_Z3foov\00"
// CUDA: @.omp_offloading.entry._Z3foov = weak constant %struct.__tgt_offload_entry { ptr @_Z18__device_stub__foov, ptr @.omp_offloading.entry_name, i64 0, i32 0, i32 0 }, section "cuda_offloading_entries", align 1
-// CUDA: @.omp_offloading.entry_name.1 = internal unnamed_addr constant [8 x i8] c"_Z3barv\00"
-// CUDA: @.omp_offloading.entry._Z3barv = weak constant %struct.__tgt_offload_entry { ptr @_Z18__device_stub__barv, ptr @.omp_offloading.entry_name.1, i64 0, i32 0, i32 0 }, section "cuda_offloading_entries", align 1
-// CUDA: @.omp_offloading.entry_name.2 = internal unnamed_addr constant [2 x i8] c"x\00"
-// CUDA: @.omp_offloading.entry.x = weak constant %struct.__tgt_offload_entry { ptr @x, ptr @.omp_offloading.entry_name.2, i64 4, i32 0, i32 0 }, section "cuda_offloading_entries", align 1
+// CUDA: @.omp_offloading.entry_name.1 = internal unnamed_addr constant [11 x i8] c"_Z6kernelv\00"
+// CUDA: @.omp_offloading.entry._Z6kernelv = weak constant %struct.__tgt_offload_entry { ptr @_Z21__device_stub__kernelv, ptr @.omp_offloading.entry_name.1, i64 0, i32 0, i32 0 }, section "cuda_offloading_entries", align 1
+// CUDA: @.omp_offloading.entry_name.2 = internal unnamed_addr constant [4 x i8] c"var\00"
+// CUDA: @.omp_offloading.entry.var = weak constant %struct.__tgt_offload_entry { ptr @var, ptr @.omp_offloading.entry_name.2, i64 4, i32 0, i32 0 }, section "cuda_offloading_entries", align 1
+// CUDA: @.omp_offloading.entry_name.3 = internal unnamed_addr constant [5 x i8] c"surf\00"
+// CUDA: @.omp_offloading.entry.surf = weak constant %struct.__tgt_offload_entry { ptr @surf, ptr @.omp_offloading.entry_name.3, i64 4, i32 2, i32 1 }, section "cuda_offloading_entries", align 1
+// CUDA: @.omp_offloading.entry_name.4 = internal unnamed_addr constant [4 x i8] c"tex\00"
+// CUDA: @.omp_offloading.entry.tex = weak constant %struct.__tgt_offload_entry { ptr @tex, ptr @.omp_offloading.entry_name.4, i64 4, i32 3, i32 1 }, section "cuda_offloading_entries", align 1
//.
// HIP: @.omp_offloading.entry_name = internal unnamed_addr constant [8 x i8] c"_Z3foov\00"
// HIP: @.omp_offloading.entry._Z3foov = weak constant %struct.__tgt_offload_entry { ptr @_Z3foov, ptr @.omp_offloading.entry_name, i64 0, i32 0, i32 0 }, section "hip_offloading_entries", align 1
-// HIP: @.omp_offloading.entry_name.1 = internal unnamed_addr constant [8 x i8] c"_Z3barv\00"
-// HIP: @.omp_offloading.entry._Z3barv = weak constant %struct.__tgt_offload_entry { ptr @_Z3barv, ptr @.omp_offloading.entry_name.1, i64 0, i32 0, i32 0 }, section "hip_offloading_entries", align 1
-// HIP: @.omp_offloading.entry_name.2 = internal unnamed_addr constant [2 x i8] c"x\00"
-// HIP: @.omp_offloading.entry.x = weak constant %struct.__tgt_offload_entry { ptr @x, ptr @.omp_offloading.entry_name.2, i64 4, i32 0, i32 0 }, section "hip_offloading_entries", align 1
+// HIP: @.omp_offloading.entry_name.1 = internal unnamed_addr constant [11 x i8] c"_Z6kernelv\00"
+// HIP: @.omp_offloading.entry._Z6kernelv = weak constant %struct.__tgt_offload_entry { ptr @_Z6kernelv, ptr @.omp_offloading.entry_name.1, i64 0, i32 0, i32 0 }, section "hip_offloading_entries", align 1
+// HIP: @.omp_offloading.entry_name.2 = internal unnamed_addr constant [4 x i8] c"var\00"
+// HIP: @.omp_offloading.entry.var = weak constant %struct.__tgt_offload_entry { ptr @var, ptr @.omp_offloading.entry_name.2, i64 4, i32 0, i32 0 }, section "hip_offloading_entries", align 1
+// HIP: @.omp_offloading.entry_name.3 = internal unnamed_addr constant [5 x i8] c"surf\00"
+// HIP: @.omp_offloading.entry.surf = weak constant %struct.__tgt_offload_entry { ptr @surf, ptr @.omp_offloading.entry_name.3, i64 4, i32 2, i32 1 }, section "hip_offloading_entries", align 1
+// HIP: @.omp_offloading.entry_name.4 = internal unnamed_addr constant [4 x i8] c"tex\00"
+// HIP: @.omp_offloading.entry.tex = weak constant %struct.__tgt_offload_entry { ptr @tex, ptr @.omp_offloading.entry_name.4, i64 4, i32 3, i32 1 }, section "hip_offloading_entries", align 1
//.
// CUDA-COFF: @.omp_offloading.entry_name = internal unnamed_addr constant [8 x i8] c"_Z3foov\00"
// CUDA-COFF: @.omp_offloading.entry._Z3foov = weak constant %struct.__tgt_offload_entry { ptr @_Z18__device_stub__foov, ptr @.omp_offloading.entry_name, i64 0, i32 0, i32 0 }, section "cuda_offloading_entries$OE", align 1
-// CUDA-COFF: @.omp_offloading.entry_name.1 = internal unnamed_addr constant [8 x i8] c"_Z3barv\00"
-// CUDA-COFF: @.omp_offloading.entry._Z3barv = weak constant %struct.__tgt_offload_entry { ptr @_Z18__device_stub__barv, ptr @.omp_offloading.entry_name.1, i64 0, i32 0, i32 0 }, section "cuda_offloading_entries$OE", align 1
-// CUDA-COFF: @.omp_offloading.entry_name.2 = internal unnamed_addr constant [2 x i8] c"x\00"
-// CUDA-COFF: @.omp_offloading.entry.x = weak constant %struct.__tgt_offload_entry { ptr @x, ptr @.omp_offloading.entry_name.2, i64 4, i32 0, i32 0 }, section "cuda_offloading_entries$OE", align 1
+// CUDA-COFF: @.omp_offloading.entry_name.1 = internal unnamed_addr constant [11 x i8] c"_Z6kernelv\00"
+// CUDA-COFF: @.omp_offloading.entry._Z6kernelv = weak constant %struct.__tgt_offload_entry { ptr @_Z21__device_stub__kernelv, ptr @.omp_offloading.entry_name.1, i64 0, i32 0, i32 0 }, section "cuda_offloading_entries$OE", align 1
+// CUDA-COFF: @.omp_offloading.entry_name.2 = internal unnamed_addr constant [4 x i8] c"var\00"
+// CUDA-COFF: @.omp_offloading.entry.var = weak constant %struct.__tgt_offload_entry { ptr @var, ptr @.omp_offloading.entry_name.2, i64 4, i32 0, i32 0 }, section "cuda_offloading_entries$OE", align 1
+// CUDA-COFF: @.omp_offloading.entry_name.3 = internal unnamed_addr constant [5 x i8] c"surf\00"
+// CUDA-COFF: @.omp_offloading.entry.surf = weak constant %struct.__tgt_offload_entry { ptr @surf, ptr @.omp_offloading.entry_name.3, i64 4, i32 2, i32 1 }, section "cuda_offloading_entries$OE", align 1
+// CUDA-COFF: @.omp_offloading.entry_name.4 = internal unnamed_addr constant [4 x i8] c"tex\00"
+// CUDA-COFF: @.omp_offloading.entry.tex = weak constant %struct.__tgt_offload_entry { ptr @tex, ptr @.omp_offloading.entry_name.4, i64 4, i32 3, i32 1 }, section "cuda_offloading_entries$OE", align 1
//.
// HIP-COFF: @.omp_offloading.entry_name = internal unnamed_addr constant [8 x i8] c"_Z3foov\00"
// HIP-COFF: @.omp_offloading.entry._Z3foov = weak constant %struct.__tgt_offload_entry { ptr @_Z3foov, ptr @.omp_offloading.entry_name, i64 0, i32 0, i32 0 }, section "hip_offloading_entries$OE", align 1
-// HIP-COFF: @.omp_offloading.entry_name.1 = internal unnamed_addr constant [8 x i8] c"_Z3barv\00"
-// HIP-COFF: @.omp_offloading.entry._Z3barv = weak constant %struct.__tgt_offload_entry { ptr @_Z3barv, ptr @.omp_offloading.entry_name.1, i64 0, i32 0, i32 0 }, section "hip_offloading_entries$OE", align 1
-// HIP-COFF: @.omp_offloading.entry_name.2 = internal unnamed_addr constant [2 x i8] c"x\00"
-// HIP-COFF: @.omp_offloading.entry.x = weak constant %struct.__tgt_offload_entry { ptr @x, ptr @.omp_offloading.entry_name.2, i64 4, i32 0, i32 0 }, section "hip_offloading_entries$OE", align 1
+// HIP-COFF: @.omp_offloading.entry_name.1 = internal unnamed_addr constant [11 x i8] c"_Z6kernelv\00"
+// HIP-COFF: @.omp_offloading.entry._Z6kernelv = weak constant %struct.__tgt_offload_entry { ptr @_Z6kernelv, ptr @.omp_offloading.entry_name.1, i64 0, i32 0, i32 0 }, section "hip_offloading_entries$OE", align 1
+// HIP-COFF: @.omp_offloading.entry_name.2 = internal unnamed_addr constant [4 x i8] c"var\00"
+// HIP-COFF: @.omp_offloading.entry.var = weak constant %struct.__tgt_offload_entry { ptr @var, ptr @.omp_offloading.entry_name.2, i64 4, i32 0, i32 0 }, section "hip_offloading_entries$OE", align 1
+// HIP-COFF: @.omp_offloading.entry_name.3 = internal unnamed_addr constant [5 x i8] c"surf\00"
+// HIP-COFF: @.omp_offloading.entry.surf = weak constant %struct.__tgt_offload_entry { ptr @surf, ptr @.omp_offloading.entry_name.3, i64 4, i32 2, i32 1 }, section "hip_offloading_entries$OE", align 1
+// HIP-COFF: @.omp_offloading.entry_name.4 = internal unnamed_addr constant [4 x i8] c"tex\00"
+// HIP-COFF: @.omp_offloading.entry.tex = weak constant %struct.__tgt_offload_entry { ptr @tex, ptr @.omp_offloading.entry_name.4, i64 4, i32 3, i32 1 }, section "hip_offloading_entries$OE", align 1
//.
// CUDA-LABEL: @_Z18__device_stub__foov(
// CUDA-NEXT: entry:
@@ -72,34 +88,52 @@
// HIP-COFF-NEXT: ret void
//
__global__ void foo() {}
+__device__ int var = 1;
+const __device__ int constant = 1;
+extern __device__ int external;
-// CUDA-LABEL: @_Z18__device_stub__barv(
+// CUDA-LABEL: @_Z21__device_stub__kernelv(
// CUDA-NEXT: entry:
-// CUDA-NEXT: [[TMP0:%.*]] = call i32 @cudaLaunch(ptr @_Z18__device_stub__barv)
+// CUDA-NEXT: [[TMP0:%.*]] = call i32 @cudaLaunch(ptr @_Z21__device_stub__kernelv)
// CUDA-NEXT: br label [[SETUP_END:%.*]]
// CUDA: setup.end:
// CUDA-NEXT: ret void
//
-// HIP-LABEL: @_Z18__device_stub__barv(
+// HIP-LABEL: @_Z21__device_stub__kernelv(
// HIP-NEXT: entry:
-// HIP-NEXT: [[TMP0:%.*]] = call i32 @hipLaunchByPtr(ptr @_Z3barv)
+// HIP-NEXT: [[TMP0:%.*]] = call i32 @hipLaunchByPtr(ptr @_Z6kernelv)
// HIP-NEXT: br label [[SETUP_END:%.*]]
// HIP: setup.end:
// HIP-NEXT: ret void
//
-// CUDA-COFF-LABEL: @_Z18__device_stub__barv(
+// CUDA-COFF-LABEL: @_Z21__device_stub__kernelv(
// CUDA-COFF-NEXT: entry:
-// CUDA-COFF-NEXT: [[TMP0:%.*]] = call i32 @cudaLaunch(ptr @_Z18__device_stub__barv)
+// CUDA-COFF-NEXT: [[TMP0:%.*]] = call i32 @cudaLaunch(ptr @_Z21__device_stub__kernelv)
// CUDA-COFF-NEXT: br label [[SETUP_END:%.*]]
// CUDA-COFF: setup.end:
// CUDA-COFF-NEXT: ret void
//
-// HIP-COFF-LABEL: @_Z18__device_stub__barv(
+// HIP-COFF-LABEL: @_Z21__device_stub__kernelv(
// HIP-COFF-NEXT: entry:
-// HIP-COFF-NEXT: [[TMP0:%.*]] = call i32 @hipLaunchByPtr(ptr @_Z3barv)
+// HIP-COFF-NEXT: [[TMP0:%.*]] = call i32 @hipLaunchByPtr(ptr @_Z6kernelv)
// HIP-COFF-NEXT: br label [[SETUP_END:%.*]]
// HIP-COFF: setup.end:
// HIP-COFF-NEXT: ret void
//
-__global__ void bar() {}
-__device__ int x = 1;
+__global__ void kernel() { external = 1; }
+
+struct surfaceReference { int desc; };
+
+template <typename T, int dim = 1>
+struct __attribute__((device_builtin_surface_type)) surface : public surfaceReference {};
+
+surface<void> surf;
+
+struct textureReference {
+ int desc;
+};
+
+template <typename T, int dim = 1, int mode = 0>
+struct __attribute__((device_builtin_texture_type)) texture : public textureReference {};
+
+texture<void> tex;
diff --git a/clang/test/Driver/linker-wrapper-image.c b/clang/test/Driver/linker-wrapper-image.c
index 73d3c40810c35a8..b9986f8afffaa9b 100644
--- a/clang/test/Driver/linker-wrapper-image.c
+++ b/clang/test/Driver/linker-wrapper-image.c
@@ -80,24 +80,33 @@
// CUDA-NEXT: br i1 icmp ne (ptr @__start_cuda_offloading_entries, ptr @__stop_cuda_offloading_entries), label %while.entry, label %while.end
// CUDA: while.entry:
-// CUDA-NEXT: %entry1 = phi ptr [ @__start_cuda_offloading_entries, %entry ], [ %7, %if.end ]
-// CUDA-NEXT: %1 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 0
-// CUDA-NEXT: %addr = load ptr, ptr %1, align 8
-// CUDA-NEXT: %2 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 1
-// CUDA-NEXT: %name = load ptr, ptr %2, align 8
-// CUDA-NEXT: %3 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 2
-// CUDA-NEXT: %size = load i64, ptr %3, align 4
-// CUDA-NEXT: %4 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 3
-// CUDA-NEXT: %flag = load i32, ptr %4, align 4
-// CUDA-NEXT: %5 = icmp eq i64 %size, 0
-// CUDA-NEXT: br i1 %5, label %if.then, label %if.else
+// CUDA-NEXT: %entry1 = phi ptr [ @__start_cuda_offloading_entries, %entry ], [ %12, %if.end ]
+// CUDA-NEXT: %1 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 0
+// CUDA-NEXT: %addr = load ptr, ptr %1, align 8
+// CUDA-NEXT: %2 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 1
+// CUDA-NEXT: %name = load ptr, ptr %2, align 8
+// CUDA-NEXT: %3 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 2
+// CUDA-NEXT: %size = load i64, ptr %3, align 4
+// CUDA-NEXT: %4 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 3
+// CUDA-NEXT: %flag = load i32, ptr %4, align 4
+// CUDA-NEXT: %5 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 4
+// CUDA-NEXT: %textype = load i32, ptr %4, align 4
+// CUDA-NEXT: %6 = and i32 %flag, 3
+// CUDA-NEXT: %7 = and i32 %flag, 4
+// CUDA-NEXT: %extern = lshr i32 %7, 2
+// CUDA-NEXT: %8 = and i32 %flag, 8
+// CUDA-NEXT: %constant = lshr i32 %8, 3
+// CUDA-NEXT: %9 = and i32 %flag, 22
+// CUDA-NEXT: %normalized = lshr i32 %9, 4
+// CUDA-NEXT: %10 = icmp eq i64 %size, 0
+// CUDA-NEXT: br i1 %10, label %if.then, label %if.else
// CUDA: if.then:
-// CUDA-NEXT: %6 = call i32 @__cudaRegisterFunction(ptr %0, ptr %addr, ptr %name, ptr %name, i32 -1, ptr null, ptr null, ptr null, ptr null, ptr null)
+// CUDA-NEXT: %11 = call i32 @__cudaRegisterFunction(ptr %0, ptr %addr, ptr %name, ptr %name, i32 -1, ptr null, ptr null, ptr null, ptr null, ptr null)
// CUDA-NEXT: br label %if.end
// CUDA: if.else:
-// CUDA-NEXT: switch i32 %flag, label %if.end [
+// CUDA-NEXT: switch i32 %6, label %if.end [
// CUDA-NEXT: i32 0, label %sw.global
// CUDA-NEXT: i32 1, label %sw.managed
// CUDA-NEXT: i32 2, label %sw.surface
@@ -105,22 +114,24 @@
// CUDA-NEXT: ]
// CUDA: sw.global:
-// CUDA-NEXT: call void @__cudaRegisterVar(ptr %0, ptr %addr, ptr %name, ptr %name, i32 0, i64 %size, i32 0, i32 0)
+// CUDA-NEXT: call void @__cudaRegisterVar(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %extern, i64 %size, i32 %constant, i32 0)
// CUDA-NEXT: br label %if.end
// CUDA: sw.managed:
// CUDA-NEXT: br label %if.end
// CUDA: sw.surface:
+// CUDA-NEXT: call void @__cudaRegisterSurface(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %textype, i32 %extern)
// CUDA-NEXT: br label %if.end
// CUDA: sw.texture:
+// CUDA-NEXT: call void @__cudaRegisterTexture(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %textype, i32 %normalized, i32 %extern)
// CUDA-NEXT: br label %if.end
// CUDA: if.end:
-// CUDA-NEXT: %7 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 1
-// CUDA-NEXT: %8 = icmp eq ptr %7, @__stop_cuda_offloading_entries
-// CUDA-NEXT: br i1 %8, label %while.end, label %while.entry
+// CUDA-NEXT: %12 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 1
+// CUDA-NEXT: %13 = icmp eq ptr %12, @__stop_cuda_offloading_entries
+// CUDA-NEXT: br i1 %13, label %while.end, label %while.entry
// CUDA: while.end:
// CUDA-NEXT: ret void
@@ -168,7 +179,7 @@
// HIP-NEXT: br i1 icmp ne (ptr @__start_hip_offloading_entries, ptr @__stop_hip_offloading_entries), label %while.entry, label %while.end
// HIP: while.entry:
-// HIP-NEXT: %entry1 = phi ptr [ @__start_hip_offloading_entries, %entry ], [ %7, %if.end ]
+// HIP-NEXT: %entry1 = phi ptr [ @__start_hip_offloading_entries, %entry ], [ %12, %if.end ]
// HIP-NEXT: %1 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 0
// HIP-NEXT: %addr = load ptr, ptr %1, align 8
// HIP-NEXT: %2 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 1
@@ -177,15 +188,24 @@
// HIP-NEXT: %size = load i64, ptr %3, align 4
// HIP-NEXT: %4 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 3
// HIP-NEXT: %flag = load i32, ptr %4, align 4
-// HIP-NEXT: %5 = icmp eq i64 %size, 0
-// HIP-NEXT: br i1 %5, label %if.then, label %if.else
+// HIP-NEXT: %5 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 4
+// HIP-NEXT: %textype = load i32, ptr %4, align 4
+// HIP-NEXT: %6 = and i32 %flag, 3
+// HIP-NEXT: %7 = and i32 %flag, 4
+// HIP-NEXT: %extern = lshr i32 %7, 2
+// HIP-NEXT: %8 = and i32 %flag, 8
+// HIP-NEXT: %constant = lshr i32 %8, 3
+// HIP-NEXT: %9 = and i32 %flag, 22
+// HIP-NEXT: %normalized = lshr i32 %9, 4
+// HIP-NEXT: %10 = icmp eq i64 %size, 0
+// HIP-NEXT: br i1 %10, label %if.then, label %if.else
// HIP: if.then:
-// HIP-NEXT: %6 = call i32 @__hipRegisterFunction(ptr %0, ptr %addr, ptr %name, ptr %name, i32 -1, ptr null, ptr null, ptr null, ptr null, ptr null)
+// HIP-NEXT: %11 = call i...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/73177
More information about the llvm-commits
mailing list