[clang] [Clang] Implement resource directory headers for common GPU intrinsics (PR #110179)
via cfe-commits
cfe-commits at lists.llvm.org
Thu Oct 3 14:04:27 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang
Author: Joseph Huber (jhuber6)
<details>
<summary>Changes</summary>
Summary:
All GPU based languages provide some way to access things like the
thread ID or other resources. However, this is spread between many
different languages and it varies between targets. The goal here is to
provide a resource directory header that just provides these in an
easier to understand way, primarily so this can be used for C/C++ code.
The interface aims to be common, to faciliate easier porting, but target
specific stuff could be put in the individual headers.
---
Patch is 58.27 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/110179.diff
6 Files Affected:
- (modified) clang/lib/Headers/CMakeLists.txt (+14)
- (added) clang/lib/Headers/amdgpuintrin.h (+153)
- (added) clang/lib/Headers/gpuintrin.h (+76)
- (added) clang/lib/Headers/nvptxintrin.h (+153)
- (added) clang/test/Headers/gpuintrin.c (+508)
- (added) clang/test/Headers/gpuintrin_lang.c (+61)
``````````diff
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index ff392e7122a448..a0e7ae67b7219a 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -268,6 +268,12 @@ set(x86_files
cpuid.h
)
+set(gpu_files
+ gpuintrin.h
+ nvptxintrin.h
+ amdgpuintrin.h
+ )
+
set(windows_only_files
intrin0.h
intrin.h
@@ -296,6 +302,7 @@ set(files
${systemz_files}
${ve_files}
${x86_files}
+ ${gpu_files}
${webassembly_files}
${windows_only_files}
${utility_files}
@@ -518,6 +525,7 @@ add_header_target("systemz-resource-headers" "${systemz_files};${zos_wrapper_fil
add_header_target("ve-resource-headers" "${ve_files}")
add_header_target("webassembly-resource-headers" "${webassembly_files}")
add_header_target("x86-resource-headers" "${x86_files}")
+add_header_target("gpu-resource-headers" "${gpu_files}")
# Other header groupings
add_header_target("hlsl-resource-headers" ${hlsl_files})
@@ -704,6 +712,12 @@ install(
EXCLUDE_FROM_ALL
COMPONENT x86-resource-headers)
+install(
+ FILES ${gpu_files}
+ DESTINATION ${header_install_dir}
+ EXCLUDE_FROM_ALL
+ COMPONENT gpu-resource-headers)
+
if(NOT CLANG_ENABLE_HLSL)
set(EXCLUDE_HLSL EXCLUDE_FROM_ALL)
endif()
diff --git a/clang/lib/Headers/amdgpuintrin.h b/clang/lib/Headers/amdgpuintrin.h
new file mode 100644
index 00000000000000..1fd7261cf4ca75
--- /dev/null
+++ b/clang/lib/Headers/amdgpuintrin.h
@@ -0,0 +1,153 @@
+//===-- amdgpuintrin.h - AMDPGU intrinsic functions -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __AMDGPUINTRIN_H
+#define __AMDGPUINTRIN_H
+
+#ifndef __AMDGPU__
+#error "This file is intended for AMDGPU targets or offloading to AMDGPU"
+#endif
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define _DEFAULT_ATTRS __attribute__((device))
+#elif !defined(_DEFAULT_ATTRS)
+#define _DEFAULT_ATTRS
+#endif
+
+#pragma omp begin declare target device_type(nohost)
+#pragma omp begin declare variant match(device = {arch(amdgcn)})
+
+// Type aliases to the address spaces used by the AMDGPU backend.
+#define _Private __attribute__((opencl_private))
+#define _Constant __attribute__((opencl_constant))
+#define _Local __attribute__((opencl_local))
+#define _Global __attribute__((opencl_global))
+
+// Attribute to declare a function as a kernel.
+#define _Kernel __attribute__((amdgpu_kernel, visibility("protected")))
+
+// Returns the number of workgroups in the 'x' dimension of the grid.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_blocks_x() {
+ return __builtin_amdgcn_grid_size_x() / __builtin_amdgcn_workgroup_size_x();
+}
+
+// Returns the number of workgroups in the 'y' dimension of the grid.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_blocks_y() {
+ return __builtin_amdgcn_grid_size_y() / __builtin_amdgcn_workgroup_size_y();
+}
+
+// Returns the number of workgroups in the 'z' dimension of the grid.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_blocks_z() {
+ return __builtin_amdgcn_grid_size_z() / __builtin_amdgcn_workgroup_size_z();
+}
+
+// Returns the 'x' dimension of the current AMD workgroup's id.
+_DEFAULT_ATTRS static inline uint32_t __gpu_block_id_x() {
+ return __builtin_amdgcn_workgroup_id_x();
+}
+
+// Returns the 'y' dimension of the current AMD workgroup's id.
+_DEFAULT_ATTRS static inline uint32_t __gpu_block_id_y() {
+ return __builtin_amdgcn_workgroup_id_y();
+}
+
+// Returns the 'z' dimension of the current AMD workgroup's id.
+_DEFAULT_ATTRS static inline uint32_t __gpu_block_id_z() {
+ return __builtin_amdgcn_workgroup_id_z();
+}
+
+// Returns the number of workitems in the 'x' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_threads_x() {
+ return __builtin_amdgcn_workgroup_size_x();
+}
+
+// Returns the number of workitems in the 'y' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_threads_y() {
+ return __builtin_amdgcn_workgroup_size_y();
+}
+
+// Returns the number of workitems in the 'z' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_threads_z() {
+ return __builtin_amdgcn_workgroup_size_z();
+}
+
+// Returns the 'x' dimension id of the workitem in the current AMD workgroup.
+_DEFAULT_ATTRS static inline uint32_t __gpu_thread_id_x() {
+ return __builtin_amdgcn_workitem_id_x();
+}
+
+// Returns the 'y' dimension id of the workitem in the current AMD workgroup.
+_DEFAULT_ATTRS static inline uint32_t __gpu_thread_id_y() {
+ return __builtin_amdgcn_workitem_id_y();
+}
+
+// Returns the 'z' dimension id of the workitem in the current AMD workgroup.
+_DEFAULT_ATTRS static inline uint32_t __gpu_thread_id_z() {
+ return __builtin_amdgcn_workitem_id_z();
+}
+
+// Returns the size of an AMD wavefront, either 32 or 64 depending on hardware
+// and compilation options.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_lanes() {
+ return __builtin_amdgcn_wavefrontsize();
+}
+
+// Returns the id of the thread inside of an AMD wavefront executing together.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint32_t __gpu_lane_id() {
+ return __builtin_amdgcn_mbcnt_hi(~0u, __builtin_amdgcn_mbcnt_lo(~0u, 0u));
+}
+
+// Returns the bit-mask of active threads in the current wavefront.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint64_t __gpu_lane_mask() {
+ return __builtin_amdgcn_read_exec();
+}
+
+// Copies the value from the first active thread in the wavefront to the rest.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint32_t
+__gpu_broadcast(uint64_t __lane_mask, uint32_t __x) {
+ return __builtin_amdgcn_readfirstlane(__x);
+}
+
+// Returns a bitmask of threads in the current lane for which \p x is true.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint64_t
+__gpu_ballot(uint64_t __lane_mask, bool __x) {
+ // The lane_mask & gives the nvptx semantics when lane_mask is a subset of
+ // the active threads
+ return __lane_mask & __builtin_amdgcn_ballot_w64(__x);
+}
+
+// Waits for all the threads in the block to converge and issues a fence.
+_DEFAULT_ATTRS [[clang::convergent]] static inline void __gpu_sync_threads() {
+ __builtin_amdgcn_s_barrier();
+ __builtin_amdgcn_fence(__ATOMIC_ACQUIRE, "workgroup");
+}
+
+// Wait for all threads in the wavefront to converge, this is a noop on AMDGPU.
+_DEFAULT_ATTRS [[clang::convergent]] static inline void
+__gpu_sync_lane(uint64_t __lane_mask) {
+ __builtin_amdgcn_wave_barrier();
+}
+
+// Shuffles the the lanes inside the wavefront according to the given index.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint32_t
+__gpu_shuffle_idx(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) {
+ return __builtin_amdgcn_ds_bpermute(__idx << 2, __x);
+}
+
+// Terminates execution of the associated wavefront.
+_DEFAULT_ATTRS [[noreturn]] static inline void __gpu_exit() {
+ __builtin_amdgcn_endpgm();
+}
+
+#pragma omp end declare variant
+#pragma omp end declare target
+
+#endif // __AMDGPUINTRIN_H
diff --git a/clang/lib/Headers/gpuintrin.h b/clang/lib/Headers/gpuintrin.h
new file mode 100644
index 00000000000000..2531ad8bba70c8
--- /dev/null
+++ b/clang/lib/Headers/gpuintrin.h
@@ -0,0 +1,76 @@
+//===-- gpuintrin.h - Generic GPU intrinsic functions ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __GPUINTRIN_H
+#define __GPUINTRIN_H
+
+#if defined(__NVPTX__)
+#include <nvptxintrin.h>
+#elif defined(__AMDGPU__)
+#include <amdgpuintrin.h>
+#endif
+
+// Returns the total number of blocks / workgroups.
+_DEFAULT_ATTRS static inline uint64_t __gpu_num_blocks() {
+ return __gpu_num_blocks_x() * __gpu_num_blocks_y() * __gpu_num_blocks_z();
+}
+
+// Returns the absolute id of the block / workgroup.
+_DEFAULT_ATTRS static inline uint64_t __gpu_block_id() {
+ return __gpu_block_id_x() +
+ (uint64_t)__gpu_num_blocks_x() * __gpu_block_id_y() +
+ (uint64_t)__gpu_num_blocks_x() * __gpu_num_blocks_y() *
+ __gpu_block_id_z();
+}
+
+// Returns the total number of threads in the block / workgroup.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_threads() {
+ return __gpu_num_threads_x() * __gpu_num_threads_y() * __gpu_num_threads_z();
+}
+
+// Returns the absolute id of the thread in the current block / workgroup.
+_DEFAULT_ATTRS static inline uint32_t __gpu_thread_id() {
+ return __gpu_thread_id_x() + __gpu_num_threads_x() * __gpu_thread_id_y() +
+ __gpu_num_threads_x() * __gpu_num_threads_y() * __gpu_thread_id_z();
+}
+
+// Get the first active thread inside the lane.
+_DEFAULT_ATTRS static inline uint64_t
+__gpu_first_lane_id(uint64_t __lane_mask) {
+ return __builtin_ffsll(__lane_mask) - 1;
+}
+
+// Conditional that is only true for a single thread in a lane.
+_DEFAULT_ATTRS static inline bool __gpu_is_first_lane(uint64_t __lane_mask) {
+ return __gpu_lane_id() == __gpu_first_lane_id(__lane_mask);
+}
+
+// Gets the sum of all lanes inside the warp or wavefront.
+_DEFAULT_ATTRS static inline uint32_t __gpu_lane_reduce(uint64_t __lane_mask,
+ uint32_t x) {
+ for (uint32_t step = __gpu_num_lanes() / 2; step > 0; step /= 2) {
+ uint32_t index = step + __gpu_lane_id();
+ x += __gpu_shuffle_idx(__lane_mask, index, x);
+ }
+ return __gpu_broadcast(__lane_mask, x);
+}
+
+// Gets the accumulator scan of the threads in the warp or wavefront.
+_DEFAULT_ATTRS static inline uint32_t __gpu_lane_scan(uint64_t __lane_mask,
+ uint32_t x) {
+ for (uint32_t step = 1; step < __gpu_num_lanes(); step *= 2) {
+ uint32_t index = __gpu_lane_id() - step;
+ uint32_t bitmask = __gpu_lane_id() >= step;
+ x += -bitmask & __gpu_shuffle_idx(__lane_mask, index, x);
+ }
+ return x;
+}
+
+#undef _DEFAULT_ATTRS
+
+#endif // __GPUINTRIN_H
diff --git a/clang/lib/Headers/nvptxintrin.h b/clang/lib/Headers/nvptxintrin.h
new file mode 100644
index 00000000000000..fc9769d4c578dd
--- /dev/null
+++ b/clang/lib/Headers/nvptxintrin.h
@@ -0,0 +1,153 @@
+//===-- nvptxintrin.h - NVPTX intrinsic functions -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __NVPTXINTRIN_H
+#define __NVPTXINTRIN_H
+
+#ifndef __NVPTX__
+#error "This file is intended for NVPTX targets or offloading to NVPTX"
+#endif
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define _DEFAULT_ATTRS __attribute__((device))
+#elif !defined(_DEFAULT_ATTRS)
+#define _DEFAULT_ATTRS
+#endif
+
+#pragma omp begin declare target device_type(nohost)
+#pragma omp begin declare variant match(device = {arch(nvptx64)})
+
+// Type aliases to the address spaces used by the NVPTX backend.
+#define _Private __attribute__((opencl_private))
+#define _Constant __attribute__((opencl_constant))
+#define _Local __attribute__((opencl_local))
+#define _Global __attribute__((opencl_global))
+
+// Attribute to declare a function as a kernel.
+#define _Kernel __attribute__((nvptx_kernel, visibility("protected")))
+
+// Returns the number of CUDA blocks in the 'x' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_blocks_x() {
+ return __nvvm_read_ptx_sreg_nctaid_x();
+}
+
+// Returns the number of CUDA blocks in the 'y' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_blocks_y() {
+ return __nvvm_read_ptx_sreg_nctaid_y();
+}
+
+// Returns the number of CUDA blocks in the 'z' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_blocks_z() {
+ return __nvvm_read_ptx_sreg_nctaid_z();
+}
+
+// Returns the 'x' dimension of the current CUDA block's id.
+_DEFAULT_ATTRS static inline uint32_t __gpu_block_id_x() {
+ return __nvvm_read_ptx_sreg_ctaid_x();
+}
+
+// Returns the 'y' dimension of the current CUDA block's id.
+_DEFAULT_ATTRS static inline uint32_t __gpu_block_id_y() {
+ return __nvvm_read_ptx_sreg_ctaid_y();
+}
+
+// Returns the 'z' dimension of the current CUDA block's id.
+_DEFAULT_ATTRS static inline uint32_t __gpu_block_id_z() {
+ return __nvvm_read_ptx_sreg_ctaid_z();
+}
+
+// Returns the number of CUDA threads in the 'x' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_threads_x() {
+ return __nvvm_read_ptx_sreg_ntid_x();
+}
+
+// Returns the number of CUDA threads in the 'y' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_threads_y() {
+ return __nvvm_read_ptx_sreg_ntid_y();
+}
+
+// Returns the number of CUDA threads in the 'z' dimension.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_threads_z() {
+ return __nvvm_read_ptx_sreg_ntid_z();
+}
+
+// Returns the 'x' dimension id of the thread in the current CUDA block.
+_DEFAULT_ATTRS static inline uint32_t __gpu_thread_id_x() {
+ return __nvvm_read_ptx_sreg_tid_x();
+}
+
+// Returns the 'y' dimension id of the thread in the current CUDA block.
+_DEFAULT_ATTRS static inline uint32_t __gpu_thread_id_y() {
+ return __nvvm_read_ptx_sreg_tid_y();
+}
+
+// Returns the 'z' dimension id of the thread in the current CUDA block.
+_DEFAULT_ATTRS static inline uint32_t __gpu_thread_id_z() {
+ return __nvvm_read_ptx_sreg_tid_z();
+}
+
+// Returns the size of a CUDA warp, always 32 on NVIDIA hardware.
+_DEFAULT_ATTRS static inline uint32_t __gpu_num_lanes() {
+ return __nvvm_read_ptx_sreg_warpsize();
+}
+
+// Returns the id of the thread inside of a CUDA warp executing together.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint32_t __gpu_lane_id() {
+ return __nvvm_read_ptx_sreg_laneid();
+}
+
+// Returns the bit-mask of active threads in the current warp.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint64_t __gpu_lane_mask() {
+ return __nvvm_activemask();
+}
+
+// Copies the value from the first active thread in the warp to the rest.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint32_t
+__gpu_broadcast(uint64_t __lane_mask, uint32_t __x) {
+ uint32_t __mask = (uint32_t)__lane_mask;
+ uint32_t __id = __builtin_ffs(__mask) - 1;
+ return __nvvm_shfl_sync_idx_i32(__mask, __x, __id, __gpu_num_lanes() - 1);
+}
+
+// Returns a bitmask of threads in the current lane for which \p x is true.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint64_t
+__gpu_ballot(uint64_t __lane_mask, bool __x) {
+ uint32_t __mask = (uint32_t)__lane_mask;
+ return __nvvm_vote_ballot_sync(__mask, __x);
+}
+
+// Waits for all the threads in the block to converge and issues a fence.
+_DEFAULT_ATTRS [[clang::convergent]] static inline void __gpu_sync_threads() {
+ __syncthreads();
+}
+
+// Waits for all threads in the warp to reconverge for independent scheduling.
+_DEFAULT_ATTRS [[clang::convergent]] static inline void
+__gpu_sync_lane(uint64_t __lane_mask) {
+ __nvvm_bar_warp_sync((uint32_t)__lane_mask);
+}
+
+// Shuffles the the lanes inside the warp according to the given index.
+_DEFAULT_ATTRS [[clang::convergent]] static inline uint32_t
+__gpu_shuffle_idx(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) {
+ uint32_t __mask = (uint32_t)__lane_mask;
+ uint32_t __bitmask = (__mask >> __idx) & 1u;
+ return -__bitmask &
+ __nvvm_shfl_sync_idx_i32(__mask, __x, __idx, __gpu_num_lanes() - 1u);
+}
+
+// Terminates execution of the calling thread.
+_DEFAULT_ATTRS [[noreturn]] static inline void __gpu_exit() { __nvvm_exit(); }
+
+#pragma omp end declare variant
+#pragma omp end declare target
+
+#endif // __NVPTXINTRIN_H
diff --git a/clang/test/Headers/gpuintrin.c b/clang/test/Headers/gpuintrin.c
new file mode 100644
index 00000000000000..ff5a816699ebef
--- /dev/null
+++ b/clang/test/Headers/gpuintrin.c
@@ -0,0 +1,508 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -internal-isystem %S/Inputs/include \
+// RUN: -internal-isystem %S/../../lib/Headers/ \
+// RUN: -triple amdgcn-amd-amdhsa -emit-llvm %s -o - \
+// RUN: | FileCheck %s --check-prefix=AMDGPU
+//
+// RUN: %clang_cc1 -internal-isystem %S/Inputs/include \
+// RUN: -internal-isystem %S/../../lib/Headers/ \
+// RUN: -target-feature +ptx62 \
+// RUN: -triple nvptx64-nvidia-cuda -emit-llvm %s -o - \
+// RUN: | FileCheck %s --check-prefix=NVPTX
+
+#define _DEFAULT_ATTRS __attribute__((always_inline))
+#include <gpuintrin.h>
+
+// AMDGPU-LABEL: define dso_local void @foo(
+// AMDGPU-SAME: ) #[[ATTR0:[0-9]+]] {
+// AMDGPU-NEXT: [[ENTRY:.*:]]
+// AMDGPU-NEXT: [[RETVAL_I116:%.*]] = alloca i64, align 8, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I114:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I112:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I110:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I19_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I17_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I15_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I12_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I9_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I_I103:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I104:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I101:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I99:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I97:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I7_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I4_I87:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I_I88:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I89:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I84:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I81:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I78:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I26_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I24_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I22_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I18_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I14_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I_I70:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I71:%.*]] = alloca i64, align 8, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I68:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I66:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I64:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I8_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I4_I:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I_I58:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I59:%.*]] = alloca i64, align 8, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I54:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I50:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I47:%.*]] = alloca i32, align 4, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I_I:%.*]] = alloca i64, align 8, addrspace(5)
+// AMDGPU-NEXT: [[__LANE_MASK_ADDR_I_I:%.*]] = alloca i64, align 8, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I42:%.*]] = alloca i1, align 1, addrspace(5)
+// AMDGPU-NEXT: [[__LANE_MASK_ADDR_I43:%.*]] = alloca i64, align 8, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I38:%.*]] = alloca i64, align 8, addrspace(5)
+// AMDGPU-NEXT: [[__LANE_MASK_ADDR_I39:%.*]] = alloca i64, align 8, addrspace(5)
+// AMDGPU-NEXT: [[RETVAL_I32:%.*]] = a...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/110179
More information about the cfe-commits
mailing list