[clang] [libc] [Clang] Rename GPU intrinsic functions from `__gpu_` to `_gpu_` (PR #118674)

Joseph Huber via cfe-commits cfe-commits at lists.llvm.org
Wed Dec 4 12:01:35 PST 2024


https://github.com/jhuber6 updated https://github.com/llvm/llvm-project/pull/118674

>From 7e28f1039a0443baea8bca7c994bb85429730674 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Wed, 4 Dec 2024 11:55:07 -0600
Subject: [PATCH] [Clang] Rename GPU intrinsic functions from `__gpu_` to
 `_gpu_`

Summary:
This is consistent with other intrinsic headers like the SSE/AVX
intrinsics. I don't think function names need to be specificlaly
reserved because we are not natively including this into any TUs. The
main reason to do this change is because LSP providers like `clangd`
intentionally ignore autocompleting `__` prefixed names as they are
considered internal. This makes using this header really, really
annoying.
---
 clang/lib/Headers/amdgpuintrin.h    |  54 +++++-----
 clang/lib/Headers/gpuintrin.h       |  98 ++++++++---------
 clang/lib/Headers/nvptxintrin.h     |  68 ++++++------
 clang/test/Headers/gpuintrin.c      | 162 ++++++++++++++--------------
 clang/test/Headers/gpuintrin_lang.c |   4 +-
 libc/shared/rpc_util.h              |  14 +--
 6 files changed, 200 insertions(+), 200 deletions(-)

diff --git a/clang/lib/Headers/amdgpuintrin.h b/clang/lib/Headers/amdgpuintrin.h
index 720674a85f52cf..07330061647915 100644
--- a/clang/lib/Headers/amdgpuintrin.h
+++ b/clang/lib/Headers/amdgpuintrin.h
@@ -34,90 +34,90 @@ _Pragma("omp begin declare variant match(device = {arch(amdgcn)})");
 #define __gpu_kernel __attribute__((amdgpu_kernel, visibility("protected")))
 
 // Returns the number of workgroups in the 'x' dimension of the grid.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_x(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_blocks_x(void) {
   return __builtin_amdgcn_grid_size_x() / __builtin_amdgcn_workgroup_size_x();
 }
 
 // Returns the number of workgroups in the 'y' dimension of the grid.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_y(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_blocks_y(void) {
   return __builtin_amdgcn_grid_size_y() / __builtin_amdgcn_workgroup_size_y();
 }
 
 // Returns the number of workgroups in the 'z' dimension of the grid.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_z(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_blocks_z(void) {
   return __builtin_amdgcn_grid_size_z() / __builtin_amdgcn_workgroup_size_z();
 }
 
 // Returns the 'x' dimension of the current AMD workgroup's id.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_x(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_block_id_x(void) {
   return __builtin_amdgcn_workgroup_id_x();
 }
 
 // Returns the 'y' dimension of the current AMD workgroup's id.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_y(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_block_id_y(void) {
   return __builtin_amdgcn_workgroup_id_y();
 }
 
 // Returns the 'z' dimension of the current AMD workgroup's id.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_z(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_block_id_z(void) {
   return __builtin_amdgcn_workgroup_id_z();
 }
 
 // Returns the number of workitems in the 'x' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_x(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_threads_x(void) {
   return __builtin_amdgcn_workgroup_size_x();
 }
 
 // Returns the number of workitems in the 'y' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_y(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_threads_y(void) {
   return __builtin_amdgcn_workgroup_size_y();
 }
 
 // Returns the number of workitems in the 'z' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_z(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_threads_z(void) {
   return __builtin_amdgcn_workgroup_size_z();
 }
 
 // Returns the 'x' dimension id of the workitem in the current AMD workgroup.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_x(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_thread_id_x(void) {
   return __builtin_amdgcn_workitem_id_x();
 }
 
 // Returns the 'y' dimension id of the workitem in the current AMD workgroup.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_y(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_thread_id_y(void) {
   return __builtin_amdgcn_workitem_id_y();
 }
 
 // Returns the 'z' dimension id of the workitem in the current AMD workgroup.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_z(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_thread_id_z(void) {
   return __builtin_amdgcn_workitem_id_z();
 }
 
 // Returns the size of an AMD wavefront, either 32 or 64 depending on hardware
 // and compilation options.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_lanes(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_lanes(void) {
   return __builtin_amdgcn_wavefrontsize();
 }
 
 // Returns the id of the thread inside of an AMD wavefront executing together.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_lane_id(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_lane_id(void) {
   return __builtin_amdgcn_mbcnt_hi(~0u, __builtin_amdgcn_mbcnt_lo(~0u, 0u));
 }
 
 // Returns the bit-mask of active threads in the current wavefront.
-_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_lane_mask(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint64_t _gpu_lane_mask(void) {
   return __builtin_amdgcn_read_exec();
 }
 
 // Copies the value from the first active thread in the wavefront to the rest.
 _DEFAULT_FN_ATTRS static __inline__ uint32_t
-__gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) {
+_gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) {
   return __builtin_amdgcn_readfirstlane(__x);
 }
 
 // Copies the value from the first active thread in the wavefront to the rest.
 _DEFAULT_FN_ATTRS __inline__ uint64_t
-__gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x) {
+_gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x) {
   uint32_t __hi = (uint32_t)(__x >> 32ull);
   uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
   return ((uint64_t)__builtin_amdgcn_readfirstlane(__hi) << 32ull) |
@@ -125,33 +125,33 @@ __gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x) {
 }
 
 // Returns a bitmask of threads in the current lane for which \p x is true.
-_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask,
-                                                          bool __x) {
+_DEFAULT_FN_ATTRS static __inline__ uint64_t _gpu_ballot(uint64_t __lane_mask,
+                                                         bool __x) {
   // The lane_mask & gives the nvptx semantics when lane_mask is a subset of
   // the active threads
   return __lane_mask & __builtin_amdgcn_ballot_w64(__x);
 }
 
 // Waits for all the threads in the block to converge and issues a fence.
-_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_threads(void) {
+_DEFAULT_FN_ATTRS static __inline__ void _gpu_sync_threads(void) {
   __builtin_amdgcn_s_barrier();
   __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup");
 }
 
 // Wait for all threads in the wavefront to converge, this is a noop on AMDGPU.
-_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_lane(uint64_t __lane_mask) {
+_DEFAULT_FN_ATTRS static __inline__ void _gpu_sync_lane(uint64_t __lane_mask) {
   __builtin_amdgcn_wave_barrier();
 }
 
 // Shuffles the the lanes inside the wavefront according to the given index.
 _DEFAULT_FN_ATTRS static __inline__ uint32_t
-__gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) {
+_gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) {
   return __builtin_amdgcn_ds_bpermute(__idx << 2, __x);
 }
 
 // Shuffles the the lanes inside the wavefront according to the given index.
 _DEFAULT_FN_ATTRS static __inline__ uint64_t
-__gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x) {
+_gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x) {
   uint32_t __hi = (uint32_t)(__x >> 32ull);
   uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
   return ((uint64_t)__builtin_amdgcn_ds_bpermute(__idx << 2, __hi) << 32ull) |
@@ -159,24 +159,24 @@ __gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x) {
 }
 
 // Returns true if the flat pointer points to CUDA 'shared' memory.
-_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_local(void *ptr) {
+_DEFAULT_FN_ATTRS static __inline__ bool _gpu_is_ptr_local(void *ptr) {
   return __builtin_amdgcn_is_shared((void __attribute__((address_space(0))) *)((
       void [[clang::opencl_generic]] *)ptr));
 }
 
 // Returns true if the flat pointer points to CUDA 'local' memory.
-_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_private(void *ptr) {
+_DEFAULT_FN_ATTRS static __inline__ bool _gpu_is_ptr_private(void *ptr) {
   return __builtin_amdgcn_is_private((void __attribute__((
       address_space(0))) *)((void [[clang::opencl_generic]] *)ptr));
 }
 
 // Terminates execution of the associated wavefront.
-_DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void __gpu_exit(void) {
+_DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void _gpu_exit(void) {
   __builtin_amdgcn_endpgm();
 }
 
 // Suspend the thread briefly to assist the scheduler during busy loops.
-_DEFAULT_FN_ATTRS static __inline__ void __gpu_thread_suspend(void) {
+_DEFAULT_FN_ATTRS static __inline__ void _gpu_thread_suspend(void) {
   __builtin_amdgcn_s_sleep(2);
 }
 
diff --git a/clang/lib/Headers/gpuintrin.h b/clang/lib/Headers/gpuintrin.h
index 4c463c333308fc..be4ab81f6c961e 100644
--- a/clang/lib/Headers/gpuintrin.h
+++ b/clang/lib/Headers/gpuintrin.h
@@ -48,56 +48,56 @@ _Pragma("omp begin declare variant match(device = {kind(gpu)})");
 #define __GPU_Z_DIM 2
 
 // Returns the number of blocks in the requested dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks(int __dim) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_blocks(int __dim) {
   switch (__dim) {
   case 0:
-    return __gpu_num_blocks_x();
+    return _gpu_num_blocks_x();
   case 1:
-    return __gpu_num_blocks_y();
+    return _gpu_num_blocks_y();
   case 2:
-    return __gpu_num_blocks_z();
+    return _gpu_num_blocks_z();
   default:
     __builtin_unreachable();
   }
 }
 
 // Returns the number of block id in the requested dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id(int __dim) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_block_id(int __dim) {
   switch (__dim) {
   case 0:
-    return __gpu_block_id_x();
+    return _gpu_block_id_x();
   case 1:
-    return __gpu_block_id_y();
+    return _gpu_block_id_y();
   case 2:
-    return __gpu_block_id_z();
+    return _gpu_block_id_z();
   default:
     __builtin_unreachable();
   }
 }
 
 // Returns the number of threads in the requested dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads(int __dim) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_threads(int __dim) {
   switch (__dim) {
   case 0:
-    return __gpu_num_threads_x();
+    return _gpu_num_threads_x();
   case 1:
-    return __gpu_num_threads_y();
+    return _gpu_num_threads_y();
   case 2:
-    return __gpu_num_threads_z();
+    return _gpu_num_threads_z();
   default:
     __builtin_unreachable();
   }
 }
 
 // Returns the thread id in the requested dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id(int __dim) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_thread_id(int __dim) {
   switch (__dim) {
   case 0:
-    return __gpu_thread_id_x();
+    return _gpu_thread_id_x();
   case 1:
-    return __gpu_thread_id_y();
+    return _gpu_thread_id_y();
   case 2:
-    return __gpu_thread_id_z();
+    return _gpu_thread_id_z();
   default:
     __builtin_unreachable();
   }
@@ -105,83 +105,83 @@ _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id(int __dim) {
 
 // Get the first active thread inside the lane.
 _DEFAULT_FN_ATTRS static __inline__ uint64_t
-__gpu_first_lane_id(uint64_t __lane_mask) {
+_gpu_first_lane_id(uint64_t __lane_mask) {
   return __builtin_ffsll(__lane_mask) - 1;
 }
 
 // Conditional that is only true for a single thread in a lane.
 _DEFAULT_FN_ATTRS static __inline__ bool
-__gpu_is_first_in_lane(uint64_t __lane_mask) {
-  return __gpu_lane_id() == __gpu_first_lane_id(__lane_mask);
+_gpu_is_first_in_lane(uint64_t __lane_mask) {
+  return _gpu_lane_id() == _gpu_first_lane_id(__lane_mask);
 }
 
 // Gets the first floating point value from the active lanes.
 _DEFAULT_FN_ATTRS static __inline__ float
-__gpu_read_first_lane_f32(uint64_t __lane_mask, float __x) {
+_gpu_read_first_lane_f32(uint64_t __lane_mask, float __x) {
   return __builtin_bit_cast(
-      float, __gpu_read_first_lane_u32(__lane_mask,
-                                       __builtin_bit_cast(uint32_t, __x)));
+      float,
+      _gpu_read_first_lane_u32(__lane_mask, __builtin_bit_cast(uint32_t, __x)));
 }
 
 // Gets the first floating point value from the active lanes.
 _DEFAULT_FN_ATTRS static __inline__ double
-__gpu_read_first_lane_f64(uint64_t __lane_mask, double __x) {
+_gpu_read_first_lane_f64(uint64_t __lane_mask, double __x) {
   return __builtin_bit_cast(
-      double, __gpu_read_first_lane_u64(__lane_mask,
-                                        __builtin_bit_cast(uint64_t, __x)));
+      double,
+      _gpu_read_first_lane_u64(__lane_mask, __builtin_bit_cast(uint64_t, __x)));
 }
 
 // Shuffles the the lanes according to the given index.
 _DEFAULT_FN_ATTRS static __inline__ float
-__gpu_shuffle_idx_f32(uint64_t __lane_mask, uint32_t __idx, float __x) {
+_gpu_shuffle_idx_f32(uint64_t __lane_mask, uint32_t __idx, float __x) {
   return __builtin_bit_cast(
-      float, __gpu_shuffle_idx_u32(__lane_mask, __idx,
-                                   __builtin_bit_cast(uint32_t, __x)));
+      float, _gpu_shuffle_idx_u32(__lane_mask, __idx,
+                                  __builtin_bit_cast(uint32_t, __x)));
 }
 
 // Shuffles the the lanes according to the given index.
 _DEFAULT_FN_ATTRS static __inline__ double
-__gpu_shuffle_idx_f64(uint64_t __lane_mask, uint32_t __idx, double __x) {
+_gpu_shuffle_idx_f64(uint64_t __lane_mask, uint32_t __idx, double __x) {
   return __builtin_bit_cast(
-      double, __gpu_shuffle_idx_u64(__lane_mask, __idx,
-                                    __builtin_bit_cast(uint64_t, __x)));
+      double, _gpu_shuffle_idx_u64(__lane_mask, __idx,
+                                   __builtin_bit_cast(uint64_t, __x)));
 }
 
 // Gets the sum of all lanes inside the warp or wavefront.
 #define __DO_LANE_SUM(__type, __suffix)                                        \
-  _DEFAULT_FN_ATTRS static __inline__ __type __gpu_lane_sum_##__suffix(        \
+  _DEFAULT_FN_ATTRS static __inline__ __type _gpu_lane_sum_##__suffix(         \
       uint64_t __lane_mask, __type __x) {                                      \
-    for (uint32_t __step = __gpu_num_lanes() / 2; __step > 0; __step /= 2) {   \
-      uint32_t __index = __step + __gpu_lane_id();                             \
-      __x += __gpu_shuffle_idx_##__suffix(__lane_mask, __index, __x);          \
+    for (uint32_t __step = _gpu_num_lanes() / 2; __step > 0; __step /= 2) {    \
+      uint32_t __index = __step + _gpu_lane_id();                              \
+      __x += _gpu_shuffle_idx_##__suffix(__lane_mask, __index, __x);           \
     }                                                                          \
-    return __gpu_read_first_lane_##__suffix(__lane_mask, __x);                 \
+    return _gpu_read_first_lane_##__suffix(__lane_mask, __x);                  \
   }
-__DO_LANE_SUM(uint32_t, u32); // uint32_t __gpu_lane_sum_u32(m, x)
-__DO_LANE_SUM(uint64_t, u64); // uint64_t __gpu_lane_sum_u64(m, x)
-__DO_LANE_SUM(float, f32);    // float __gpu_lane_sum_f32(m, x)
-__DO_LANE_SUM(double, f64);   // double __gpu_lane_sum_f64(m, x)
+__DO_LANE_SUM(uint32_t, u32); // uint32_t _gpu_lane_sum_u32(m, x)
+__DO_LANE_SUM(uint64_t, u64); // uint64_t _gpu_lane_sum_u64(m, x)
+__DO_LANE_SUM(float, f32);    // float _gpu_lane_sum_f32(m, x)
+__DO_LANE_SUM(double, f64);   // double _gpu_lane_sum_f64(m, x)
 #undef __DO_LANE_SUM
 
 // Gets the accumulator scan of the threads in the warp or wavefront.
 #define __DO_LANE_SCAN(__type, __bitmask_type, __suffix)                       \
-  _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_lane_scan_##__suffix(     \
+  _DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_lane_scan_##__suffix(      \
       uint64_t __lane_mask, uint32_t __x) {                                    \
-    for (uint32_t __step = 1; __step < __gpu_num_lanes(); __step *= 2) {       \
-      uint32_t __index = __gpu_lane_id() - __step;                             \
-      __bitmask_type bitmask = __gpu_lane_id() >= __step;                      \
+    for (uint32_t __step = 1; __step < _gpu_num_lanes(); __step *= 2) {        \
+      uint32_t __index = _gpu_lane_id() - __step;                              \
+      __bitmask_type bitmask = _gpu_lane_id() >= __step;                       \
       __x += __builtin_bit_cast(                                               \
           __type,                                                              \
           -bitmask & __builtin_bit_cast(__bitmask_type,                        \
-                                        __gpu_shuffle_idx_##__suffix(          \
+                                        _gpu_shuffle_idx_##__suffix(           \
                                             __lane_mask, __index, __x)));      \
     }                                                                          \
     return __x;                                                                \
   }
-__DO_LANE_SCAN(uint32_t, uint32_t, u32); // uint32_t __gpu_lane_scan_u32(m, x)
-__DO_LANE_SCAN(uint64_t, uint64_t, u64); // uint64_t __gpu_lane_scan_u64(m, x)
-__DO_LANE_SCAN(float, uint32_t, f32);    // float __gpu_lane_scan_f32(m, x)
-__DO_LANE_SCAN(double, uint64_t, f64);   // double __gpu_lane_scan_f64(m, x)
+__DO_LANE_SCAN(uint32_t, uint32_t, u32); // uint32_t _gpu_lane_scan_u32(m, x)
+__DO_LANE_SCAN(uint64_t, uint64_t, u64); // uint64_t _gpu_lane_scan_u64(m, x)
+__DO_LANE_SCAN(float, uint32_t, f32);    // float _gpu_lane_scan_f32(m, x)
+__DO_LANE_SCAN(double, uint64_t, f64);   // double _gpu_lane_scan_f64(m, x)
 #undef __DO_LANE_SCAN
 
 _Pragma("omp end declare variant");
diff --git a/clang/lib/Headers/nvptxintrin.h b/clang/lib/Headers/nvptxintrin.h
index 962dca9cf03126..14ff684cb893a4 100644
--- a/clang/lib/Headers/nvptxintrin.h
+++ b/clang/lib/Headers/nvptxintrin.h
@@ -34,159 +34,159 @@ _Pragma("omp begin declare variant match(device = {arch(nvptx64)})");
 #define __gpu_kernel __attribute__((nvptx_kernel, visibility("protected")))
 
 // Returns the number of CUDA blocks in the 'x' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_x(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_blocks_x(void) {
   return __nvvm_read_ptx_sreg_nctaid_x();
 }
 
 // Returns the number of CUDA blocks in the 'y' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_y(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_blocks_y(void) {
   return __nvvm_read_ptx_sreg_nctaid_y();
 }
 
 // Returns the number of CUDA blocks in the 'z' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_z(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_blocks_z(void) {
   return __nvvm_read_ptx_sreg_nctaid_z();
 }
 
 // Returns the 'x' dimension of the current CUDA block's id.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_x(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_block_id_x(void) {
   return __nvvm_read_ptx_sreg_ctaid_x();
 }
 
 // Returns the 'y' dimension of the current CUDA block's id.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_y(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_block_id_y(void) {
   return __nvvm_read_ptx_sreg_ctaid_y();
 }
 
 // Returns the 'z' dimension of the current CUDA block's id.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_z(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_block_id_z(void) {
   return __nvvm_read_ptx_sreg_ctaid_z();
 }
 
 // Returns the number of CUDA threads in the 'x' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_x(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_threads_x(void) {
   return __nvvm_read_ptx_sreg_ntid_x();
 }
 
 // Returns the number of CUDA threads in the 'y' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_y(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_threads_y(void) {
   return __nvvm_read_ptx_sreg_ntid_y();
 }
 
 // Returns the number of CUDA threads in the 'z' dimension.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_z(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_threads_z(void) {
   return __nvvm_read_ptx_sreg_ntid_z();
 }
 
 // Returns the 'x' dimension id of the thread in the current CUDA block.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_x(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_thread_id_x(void) {
   return __nvvm_read_ptx_sreg_tid_x();
 }
 
 // Returns the 'y' dimension id of the thread in the current CUDA block.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_y(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_thread_id_y(void) {
   return __nvvm_read_ptx_sreg_tid_y();
 }
 
 // Returns the 'z' dimension id of the thread in the current CUDA block.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_z(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_thread_id_z(void) {
   return __nvvm_read_ptx_sreg_tid_z();
 }
 
 // Returns the size of a CUDA warp, always 32 on NVIDIA hardware.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_lanes(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_num_lanes(void) {
   return __nvvm_read_ptx_sreg_warpsize();
 }
 
 // Returns the id of the thread inside of a CUDA warp executing together.
-_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_lane_id(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint32_t _gpu_lane_id(void) {
   return __nvvm_read_ptx_sreg_laneid();
 }
 
 // Returns the bit-mask of active threads in the current warp.
-_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_lane_mask(void) {
+_DEFAULT_FN_ATTRS static __inline__ uint64_t _gpu_lane_mask(void) {
   return __nvvm_activemask();
 }
 
 // Copies the value from the first active thread in the warp to the rest.
 _DEFAULT_FN_ATTRS static __inline__ uint32_t
-__gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) {
+_gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) {
   uint32_t __mask = (uint32_t)__lane_mask;
   uint32_t __id = __builtin_ffs(__mask) - 1;
-  return __nvvm_shfl_sync_idx_i32(__mask, __x, __id, __gpu_num_lanes() - 1);
+  return __nvvm_shfl_sync_idx_i32(__mask, __x, __id, _gpu_num_lanes() - 1);
 }
 
 // Copies the value from the first active thread in the warp to the rest.
 _DEFAULT_FN_ATTRS static __inline__ uint64_t
-__gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x) {
+_gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x) {
   uint32_t __hi = (uint32_t)(__x >> 32ull);
   uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
   uint32_t __mask = (uint32_t)__lane_mask;
   uint32_t __id = __builtin_ffs(__mask) - 1;
   return ((uint64_t)__nvvm_shfl_sync_idx_i32(__mask, __hi, __id,
-                                             __gpu_num_lanes() - 1)
+                                             _gpu_num_lanes() - 1)
           << 32ull) |
          ((uint64_t)__nvvm_shfl_sync_idx_i32(__mask, __lo, __id,
-                                             __gpu_num_lanes() - 1));
+                                             _gpu_num_lanes() - 1));
 }
 
 // Returns a bitmask of threads in the current lane for which \p x is true.
-_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask,
-                                                          bool __x) {
+_DEFAULT_FN_ATTRS static __inline__ uint64_t _gpu_ballot(uint64_t __lane_mask,
+                                                         bool __x) {
   uint32_t __mask = (uint32_t)__lane_mask;
   return __nvvm_vote_ballot_sync(__mask, __x);
 }
 
 // Waits for all the threads in the block to converge and issues a fence.
-_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_threads(void) {
+_DEFAULT_FN_ATTRS static __inline__ void _gpu_sync_threads(void) {
   __syncthreads();
 }
 
 // Waits for all threads in the warp to reconverge for independent scheduling.
-_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_lane(uint64_t __lane_mask) {
+_DEFAULT_FN_ATTRS static __inline__ void _gpu_sync_lane(uint64_t __lane_mask) {
   __nvvm_bar_warp_sync((uint32_t)__lane_mask);
 }
 
 // Shuffles the the lanes inside the warp according to the given index.
 _DEFAULT_FN_ATTRS static __inline__ uint32_t
-__gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) {
+_gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) {
   uint32_t __mask = (uint32_t)__lane_mask;
   uint32_t __bitmask = (__mask >> __idx) & 1u;
   return -__bitmask &
-         __nvvm_shfl_sync_idx_i32(__mask, __x, __idx, __gpu_num_lanes() - 1u);
+         __nvvm_shfl_sync_idx_i32(__mask, __x, __idx, _gpu_num_lanes() - 1u);
 }
 
 // Shuffles the the lanes inside the warp according to the given index.
 _DEFAULT_FN_ATTRS static __inline__ uint64_t
-__gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x) {
+_gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x) {
   uint32_t __hi = (uint32_t)(__x >> 32ull);
   uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
   uint32_t __mask = (uint32_t)__lane_mask;
   uint64_t __bitmask = (__mask >> __idx) & 1u;
-  return -__bitmask & ((uint64_t)__nvvm_shfl_sync_idx_i32(
-                           __mask, __hi, __idx, __gpu_num_lanes() - 1u)
+  return -__bitmask & ((uint64_t)__nvvm_shfl_sync_idx_i32(__mask, __hi, __idx,
+                                                          _gpu_num_lanes() - 1u)
                        << 32ull) |
          ((uint64_t)__nvvm_shfl_sync_idx_i32(__mask, __lo, __idx,
-                                             __gpu_num_lanes() - 1u));
+                                             _gpu_num_lanes() - 1u));
 }
 
 // Returns true if the flat pointer points to CUDA 'shared' memory.
-_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_local(void *ptr) {
+_DEFAULT_FN_ATTRS static __inline__ bool _gpu_is_ptr_local(void *ptr) {
   return __nvvm_isspacep_shared(ptr);
 }
 
 // Returns true if the flat pointer points to CUDA 'local' memory.
-_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_private(void *ptr) {
+_DEFAULT_FN_ATTRS static __inline__ bool _gpu_is_ptr_private(void *ptr) {
   return __nvvm_isspacep_local(ptr);
 }
 
 // Terminates execution of the calling thread.
-_DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void __gpu_exit(void) {
+_DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void _gpu_exit(void) {
   __nvvm_exit();
 }
 
 // Suspend the thread briefly to assist the scheduler during busy loops.
-_DEFAULT_FN_ATTRS static __inline__ void __gpu_thread_suspend(void) {
+_DEFAULT_FN_ATTRS static __inline__ void _gpu_thread_suspend(void) {
   if (__nvvm_reflect("__CUDA_ARCH") >= 700)
     asm("nanosleep.u32 64;" ::: "memory");
 }
diff --git a/clang/test/Headers/gpuintrin.c b/clang/test/Headers/gpuintrin.c
index 2e45f73692f534..6947105fdebedd 100644
--- a/clang/test/Headers/gpuintrin.c
+++ b/clang/test/Headers/gpuintrin.c
@@ -15,93 +15,93 @@
 // AMDGPU-LABEL: define protected amdgpu_kernel void @foo(
 // AMDGPU-SAME: ) #[[ATTR0:[0-9]+]] {
 // AMDGPU-NEXT:  [[ENTRY:.*:]]
-// AMDGPU-NEXT:    [[CALL:%.*]] = call i32 @__gpu_num_blocks_x() #[[ATTR7:[0-9]+]]
-// AMDGPU-NEXT:    [[CALL1:%.*]] = call i32 @__gpu_num_blocks_y() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL2:%.*]] = call i32 @__gpu_num_blocks_z() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL3:%.*]] = call i32 @__gpu_num_blocks(i32 noundef 0) #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL4:%.*]] = call i32 @__gpu_block_id_x() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL5:%.*]] = call i32 @__gpu_block_id_y() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL6:%.*]] = call i32 @__gpu_block_id_z() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL7:%.*]] = call i32 @__gpu_block_id(i32 noundef 0) #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL8:%.*]] = call i32 @__gpu_num_threads_x() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL9:%.*]] = call i32 @__gpu_num_threads_y() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL10:%.*]] = call i32 @__gpu_num_threads_z() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL11:%.*]] = call i32 @__gpu_num_threads(i32 noundef 0) #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL12:%.*]] = call i32 @__gpu_thread_id_x() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL13:%.*]] = call i32 @__gpu_thread_id_y() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL14:%.*]] = call i32 @__gpu_thread_id_z() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL15:%.*]] = call i32 @__gpu_thread_id(i32 noundef 0) #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL16:%.*]] = call i32 @__gpu_num_lanes() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL17:%.*]] = call i32 @__gpu_lane_id() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL18:%.*]] = call i64 @__gpu_lane_mask() #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL19:%.*]] = call i32 @__gpu_read_first_lane_u32(i64 noundef -1, i32 noundef -1) #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL20:%.*]] = call i64 @__gpu_ballot(i64 noundef -1, i1 noundef zeroext true) #[[ATTR7]]
-// AMDGPU-NEXT:    call void @__gpu_sync_threads() #[[ATTR7]]
-// AMDGPU-NEXT:    call void @__gpu_sync_lane(i64 noundef -1) #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL21:%.*]] = call i32 @__gpu_shuffle_idx_u32(i64 noundef -1, i32 noundef -1, i32 noundef -1) #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL22:%.*]] = call i64 @__gpu_first_lane_id(i64 noundef -1) #[[ATTR7]]
-// AMDGPU-NEXT:    [[CALL23:%.*]] = call zeroext i1 @__gpu_is_first_in_lane(i64 noundef -1) #[[ATTR7]]
-// AMDGPU-NEXT:    call void @__gpu_exit() #[[ATTR8:[0-9]+]]
+// AMDGPU-NEXT:    [[CALL:%.*]] = call i32 @_gpu_num_blocks_x() #[[ATTR7:[0-9]+]]
+// AMDGPU-NEXT:    [[CALL1:%.*]] = call i32 @_gpu_num_blocks_y() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL2:%.*]] = call i32 @_gpu_num_blocks_z() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL3:%.*]] = call i32 @_gpu_num_blocks(i32 noundef 0) #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL4:%.*]] = call i32 @_gpu_block_id_x() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL5:%.*]] = call i32 @_gpu_block_id_y() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL6:%.*]] = call i32 @_gpu_block_id_z() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL7:%.*]] = call i32 @_gpu_block_id(i32 noundef 0) #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL8:%.*]] = call i32 @_gpu_num_threads_x() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL9:%.*]] = call i32 @_gpu_num_threads_y() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL10:%.*]] = call i32 @_gpu_num_threads_z() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL11:%.*]] = call i32 @_gpu_num_threads(i32 noundef 0) #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL12:%.*]] = call i32 @_gpu_thread_id_x() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL13:%.*]] = call i32 @_gpu_thread_id_y() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL14:%.*]] = call i32 @_gpu_thread_id_z() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL15:%.*]] = call i32 @_gpu_thread_id(i32 noundef 0) #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL16:%.*]] = call i32 @_gpu_num_lanes() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL17:%.*]] = call i32 @_gpu_lane_id() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL18:%.*]] = call i64 @_gpu_lane_mask() #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL19:%.*]] = call i32 @_gpu_read_first_lane_u32(i64 noundef -1, i32 noundef -1) #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL20:%.*]] = call i64 @_gpu_ballot(i64 noundef -1, i1 noundef zeroext true) #[[ATTR7]]
+// AMDGPU-NEXT:    call void @_gpu_sync_threads() #[[ATTR7]]
+// AMDGPU-NEXT:    call void @_gpu_sync_lane(i64 noundef -1) #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL21:%.*]] = call i32 @_gpu_shuffle_idx_u32(i64 noundef -1, i32 noundef -1, i32 noundef -1) #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL22:%.*]] = call i64 @_gpu_first_lane_id(i64 noundef -1) #[[ATTR7]]
+// AMDGPU-NEXT:    [[CALL23:%.*]] = call zeroext i1 @_gpu_is_first_in_lane(i64 noundef -1) #[[ATTR7]]
+// AMDGPU-NEXT:    call void @_gpu_exit() #[[ATTR8:[0-9]+]]
 // AMDGPU-NEXT:    unreachable
 //
 // NVPTX-LABEL: define protected void @foo(
 // NVPTX-SAME: ) #[[ATTR0:[0-9]+]] {
 // NVPTX-NEXT:  [[ENTRY:.*:]]
-// NVPTX-NEXT:    [[CALL:%.*]] = call i32 @__gpu_num_blocks_x() #[[ATTR6:[0-9]+]]
-// NVPTX-NEXT:    [[CALL1:%.*]] = call i32 @__gpu_num_blocks_y() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL2:%.*]] = call i32 @__gpu_num_blocks_z() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL3:%.*]] = call i32 @__gpu_num_blocks(i32 noundef 0) #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL4:%.*]] = call i32 @__gpu_block_id_x() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL5:%.*]] = call i32 @__gpu_block_id_y() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL6:%.*]] = call i32 @__gpu_block_id_z() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL7:%.*]] = call i32 @__gpu_block_id(i32 noundef 0) #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL8:%.*]] = call i32 @__gpu_num_threads_x() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL9:%.*]] = call i32 @__gpu_num_threads_y() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL10:%.*]] = call i32 @__gpu_num_threads_z() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL11:%.*]] = call i32 @__gpu_num_threads(i32 noundef 0) #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL12:%.*]] = call i32 @__gpu_thread_id_x() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL13:%.*]] = call i32 @__gpu_thread_id_y() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL14:%.*]] = call i32 @__gpu_thread_id_z() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL15:%.*]] = call i32 @__gpu_thread_id(i32 noundef 0) #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL16:%.*]] = call i32 @__gpu_num_lanes() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL17:%.*]] = call i32 @__gpu_lane_id() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL18:%.*]] = call i64 @__gpu_lane_mask() #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL19:%.*]] = call i32 @__gpu_read_first_lane_u32(i64 noundef -1, i32 noundef -1) #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL20:%.*]] = call i64 @__gpu_ballot(i64 noundef -1, i1 noundef zeroext true) #[[ATTR6]]
-// NVPTX-NEXT:    call void @__gpu_sync_threads() #[[ATTR6]]
-// NVPTX-NEXT:    call void @__gpu_sync_lane(i64 noundef -1) #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL21:%.*]] = call i32 @__gpu_shuffle_idx_u32(i64 noundef -1, i32 noundef -1, i32 noundef -1) #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL22:%.*]] = call i64 @__gpu_first_lane_id(i64 noundef -1) #[[ATTR6]]
-// NVPTX-NEXT:    [[CALL23:%.*]] = call zeroext i1 @__gpu_is_first_in_lane(i64 noundef -1) #[[ATTR6]]
-// NVPTX-NEXT:    call void @__gpu_exit() #[[ATTR7:[0-9]+]]
+// NVPTX-NEXT:    [[CALL:%.*]] = call i32 @_gpu_num_blocks_x() #[[ATTR6:[0-9]+]]
+// NVPTX-NEXT:    [[CALL1:%.*]] = call i32 @_gpu_num_blocks_y() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL2:%.*]] = call i32 @_gpu_num_blocks_z() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL3:%.*]] = call i32 @_gpu_num_blocks(i32 noundef 0) #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL4:%.*]] = call i32 @_gpu_block_id_x() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL5:%.*]] = call i32 @_gpu_block_id_y() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL6:%.*]] = call i32 @_gpu_block_id_z() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL7:%.*]] = call i32 @_gpu_block_id(i32 noundef 0) #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL8:%.*]] = call i32 @_gpu_num_threads_x() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL9:%.*]] = call i32 @_gpu_num_threads_y() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL10:%.*]] = call i32 @_gpu_num_threads_z() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL11:%.*]] = call i32 @_gpu_num_threads(i32 noundef 0) #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL12:%.*]] = call i32 @_gpu_thread_id_x() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL13:%.*]] = call i32 @_gpu_thread_id_y() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL14:%.*]] = call i32 @_gpu_thread_id_z() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL15:%.*]] = call i32 @_gpu_thread_id(i32 noundef 0) #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL16:%.*]] = call i32 @_gpu_num_lanes() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL17:%.*]] = call i32 @_gpu_lane_id() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL18:%.*]] = call i64 @_gpu_lane_mask() #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL19:%.*]] = call i32 @_gpu_read_first_lane_u32(i64 noundef -1, i32 noundef -1) #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL20:%.*]] = call i64 @_gpu_ballot(i64 noundef -1, i1 noundef zeroext true) #[[ATTR6]]
+// NVPTX-NEXT:    call void @_gpu_sync_threads() #[[ATTR6]]
+// NVPTX-NEXT:    call void @_gpu_sync_lane(i64 noundef -1) #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL21:%.*]] = call i32 @_gpu_shuffle_idx_u32(i64 noundef -1, i32 noundef -1, i32 noundef -1) #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL22:%.*]] = call i64 @_gpu_first_lane_id(i64 noundef -1) #[[ATTR6]]
+// NVPTX-NEXT:    [[CALL23:%.*]] = call zeroext i1 @_gpu_is_first_in_lane(i64 noundef -1) #[[ATTR6]]
+// NVPTX-NEXT:    call void @_gpu_exit() #[[ATTR7:[0-9]+]]
 // NVPTX-NEXT:    unreachable
 //
 __gpu_kernel void foo() {
-  __gpu_num_blocks_x();
-  __gpu_num_blocks_y();
-  __gpu_num_blocks_z();
-  __gpu_num_blocks(0);
-  __gpu_block_id_x();
-  __gpu_block_id_y();
-  __gpu_block_id_z();
-  __gpu_block_id(0);
-  __gpu_num_threads_x();
-  __gpu_num_threads_y();
-  __gpu_num_threads_z();
-  __gpu_num_threads(0);
-  __gpu_thread_id_x();
-  __gpu_thread_id_y();
-  __gpu_thread_id_z();
-  __gpu_thread_id(0);
-  __gpu_num_lanes();
-  __gpu_lane_id();
-  __gpu_lane_mask();
-  __gpu_read_first_lane_u32(-1, -1);
-  __gpu_ballot(-1, 1);
-  __gpu_sync_threads();
-  __gpu_sync_lane(-1);
-  __gpu_shuffle_idx_u32(-1, -1, -1);
-  __gpu_first_lane_id(-1);
-  __gpu_is_first_in_lane(-1);
-  __gpu_exit();
+  _gpu_num_blocks_x();
+  _gpu_num_blocks_y();
+  _gpu_num_blocks_z();
+  _gpu_num_blocks(0);
+  _gpu_block_id_x();
+  _gpu_block_id_y();
+  _gpu_block_id_z();
+  _gpu_block_id(0);
+  _gpu_num_threads_x();
+  _gpu_num_threads_y();
+  _gpu_num_threads_z();
+  _gpu_num_threads(0);
+  _gpu_thread_id_x();
+  _gpu_thread_id_y();
+  _gpu_thread_id_z();
+  _gpu_thread_id(0);
+  _gpu_num_lanes();
+  _gpu_lane_id();
+  _gpu_lane_mask();
+  _gpu_read_first_lane_u32(-1, -1);
+  _gpu_ballot(-1, 1);
+  _gpu_sync_threads();
+  _gpu_sync_lane(-1);
+  _gpu_shuffle_idx_u32(-1, -1, -1);
+  _gpu_first_lane_id(-1);
+  _gpu_is_first_in_lane(-1);
+  _gpu_exit();
 }
diff --git a/clang/test/Headers/gpuintrin_lang.c b/clang/test/Headers/gpuintrin_lang.c
index fa04849f8094d6..855d1747c5cf98 100644
--- a/clang/test/Headers/gpuintrin_lang.c
+++ b/clang/test/Headers/gpuintrin_lang.c
@@ -31,7 +31,7 @@
 #include <gpuintrin.h>
 
 #ifdef __device__
-__device__ int foo() { return __gpu_thread_id_x(); }
+__device__ int foo() { return _gpu_thread_id_x(); }
 #else
 // CUDA-LABEL: define dso_local i32 @foo(
 // CUDA-SAME: ) #[[ATTR0:[0-9]+]] {
@@ -71,6 +71,6 @@ __device__ int foo() { return __gpu_thread_id_x(); }
 // C89-NEXT:    [[TMP0:%.*]] = call noundef {{.*}}i32 @llvm.amdgcn.workitem.id.x()
 // C89-NEXT:    ret i32 [[TMP0]]
 //
-int foo() { return __gpu_thread_id_x(); }
+int foo() { return _gpu_thread_id_x(); }
 #pragma omp declare target to(foo)
 #endif
diff --git a/libc/shared/rpc_util.h b/libc/shared/rpc_util.h
index bb0177c01b85ea..6ba0a71825547e 100644
--- a/libc/shared/rpc_util.h
+++ b/libc/shared/rpc_util.h
@@ -172,14 +172,14 @@ RPC_INLINE constexpr bool is_process_gpu() {
 /// Wait for all lanes in the group to complete.
 RPC_INLINE void sync_lane(uint64_t lane_mask) {
 #ifdef RPC_TARGET_IS_GPU
-  return __gpu_sync_lane(lane_mask);
+  return _gpu_sync_lane(lane_mask);
 #endif
 }
 
 /// Copies the value from the first active thread to the rest.
 RPC_INLINE uint32_t broadcast_value(uint64_t lane_mask, uint32_t x) {
 #ifdef RPC_TARGET_IS_GPU
-  return __gpu_read_first_lane_u32(lane_mask, x);
+  return _gpu_read_first_lane_u32(lane_mask, x);
 #else
   return x;
 #endif
@@ -188,7 +188,7 @@ RPC_INLINE uint32_t broadcast_value(uint64_t lane_mask, uint32_t x) {
 /// Returns the number lanes that participate in the RPC interface.
 RPC_INLINE uint32_t get_num_lanes() {
 #ifdef RPC_TARGET_IS_GPU
-  return __gpu_num_lanes();
+  return _gpu_num_lanes();
 #else
   return 1;
 #endif
@@ -197,7 +197,7 @@ RPC_INLINE uint32_t get_num_lanes() {
 /// Returns the id of the thread inside of an AMD wavefront executing together.
 RPC_INLINE uint64_t get_lane_mask() {
 #ifdef RPC_TARGET_IS_GPU
-  return __gpu_lane_mask();
+  return _gpu_lane_mask();
 #else
   return 1;
 #endif
@@ -206,7 +206,7 @@ RPC_INLINE uint64_t get_lane_mask() {
 /// Returns the id of the thread inside of an AMD wavefront executing together.
 RPC_INLINE uint32_t get_lane_id() {
 #ifdef RPC_TARGET_IS_GPU
-  return __gpu_lane_id();
+  return _gpu_lane_id();
 #else
   return 0;
 #endif
@@ -215,7 +215,7 @@ RPC_INLINE uint32_t get_lane_id() {
 /// Conditional that is only true for a single thread in a lane.
 RPC_INLINE bool is_first_lane(uint64_t lane_mask) {
 #ifdef RPC_TARGET_IS_GPU
-  return __gpu_is_first_in_lane(lane_mask);
+  return _gpu_is_first_in_lane(lane_mask);
 #else
   return true;
 #endif
@@ -224,7 +224,7 @@ RPC_INLINE bool is_first_lane(uint64_t lane_mask) {
 /// Returns a bitmask of threads in the current lane for which \p x is true.
 RPC_INLINE uint64_t ballot(uint64_t lane_mask, bool x) {
 #ifdef RPC_TARGET_IS_GPU
-  return __gpu_ballot(lane_mask, x);
+  return _gpu_ballot(lane_mask, x);
 #else
   return x;
 #endif



More information about the cfe-commits mailing list