[clang] [Clang] Remove 't' from __builtin_amdgcn_flat_atomic_fadd_f32/f64 (PR #173381)
Juan Manuel Martinez CaamaƱo via cfe-commits
cfe-commits at lists.llvm.org
Wed Dec 24 01:29:15 PST 2025
https://github.com/jmmartinez updated https://github.com/llvm/llvm-project/pull/173381
>From 929a31ad81d33b6ddb789f80d1b824910fd6c2cb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?=
<jmartinezcaamao at gmail.com>
Date: Tue, 23 Dec 2025 13:27:43 +0100
Subject: [PATCH 1/3] Pre-commit test: [Clang] Remove 't' from
__builtin_amdgcn_flat_atomic_fadd_f32/f64
---
.../SemaHIP/amdgpu-flat-atomic-fadd-err.hip | 30 +++++++++++++++++++
1 file changed, 30 insertions(+)
create mode 100644 clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip
diff --git a/clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip b/clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip
new file mode 100644
index 0000000000000..7cdaf782a66fe
--- /dev/null
+++ b/clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip
@@ -0,0 +1,30 @@
+// RUN: %clang_cc1 -fsyntax-only -triple amdgcn -target-cpu gfx942 -verify %s -fcuda-is-device
+// RUN: %clang_cc1 -fsyntax-only -triple x86_64 -aux-triple amdgcn -verify %s
+// expected-no-diagnostics
+
+#define __device__ __attribute__((device))
+#define __shared__ __attribute__((shared))
+
+__device__ void test_flat_atomic_fadd_f32_valid(float *ptr, float val) {
+ float result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr, val);
+}
+
+__device__ void test_flat_atomic_fadd_f32_errors(float *ptr, float val,
+ double *ptr_d) {
+ float result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr, val, 0);
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr_d, val);
+}
+
+__device__ void test_flat_atomic_fadd_f64_valid(double *ptr, double val) {
+ double result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr, val);
+}
+
+__device__ void test_flat_atomic_fadd_f64_errors(double *ptr, double val,
+ float *ptr_f) {
+ double result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr, val, 0);
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr_f, val);
+}
>From c473c709fb2bb1abfa2bfe0c533435dab4dbed0d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?=
<jmartinezcaamao at gmail.com>
Date: Tue, 23 Dec 2025 13:47:57 +0100
Subject: [PATCH 2/3] [Clang] Remove 't' from
__builtin_amdgcn_flat_atomic_fadd_f32/f64
Allows for type checking depending on the builtin signature.
This introduces some subtle changes in code generation: before, since
the signature was meaningless, we would accept any pointer type wihtout
casting. After this change, the pointer of the atomicrmw matches the
flat address space.
---
clang/include/clang/Basic/BuiltinsAMDGPU.def | 4 ++--
clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx90a.cl | 4 ++--
clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip | 9 ++++-----
3 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 88b306462a92c..2623bd476f08f 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -272,14 +272,14 @@ TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2f16, "V2hV2h*1V2h", "t", "a
TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fmin_f64, "dd*1d", "t", "gfx90a-insts")
TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fmax_f64, "dd*1d", "t", "gfx90a-insts")
-TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_f64, "dd*0d", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_f64, "dd*0d", "", "gfx90a-insts")
TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fmin_f64, "dd*0d", "t", "gfx90a-insts")
TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fmax_f64, "dd*0d", "t", "gfx90a-insts")
TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_f64, "dd*3d", "", "gfx90a-insts")
TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_f32, "ff*3f", "", "gfx8-insts")
-TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_f32, "ff*0f", "t", "gfx940-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_f32, "ff*0f", "", "gfx940-insts")
TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2f16, "V2hV2h*0V2h", "t", "atomic-flat-pk-add-16-insts")
TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2bf16, "V2sV2s*0V2s", "t", "atomic-flat-pk-add-16-insts")
TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2bf16, "V2sV2s*1V2s", "t", "atomic-global-pk-add-bf16-inst")
diff --git a/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx90a.cl b/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx90a.cl
index ef97d12afab1d..8b10e544c71c4 100644
--- a/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx90a.cl
+++ b/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx90a.cl
@@ -47,7 +47,7 @@ void test_global_max_f64(__global double *addr, double x){
}
// CHECK-LABEL: test_flat_add_local_f64
-// CHECK: = atomicrmw fadd ptr addrspace(3) %{{.+}}, double %{{.+}} syncscope("agent") monotonic, align 8{{$}}
+// CHECK: = atomicrmw fadd ptr %{{.+}}, double %{{.+}} syncscope("agent") monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
// GFX90A-LABEL: test_flat_add_local_f64$local
// GFX90A: ds_add_rtn_f64
@@ -57,7 +57,7 @@ void test_flat_add_local_f64(__local double *addr, double x){
}
// CHECK-LABEL: test_flat_global_add_f64
-// CHECK: = atomicrmw fadd ptr addrspace(1) {{.+}}, double %{{.+}} syncscope("agent") monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+// CHECK: = atomicrmw fadd ptr {{.+}}, double %{{.+}} syncscope("agent") monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
// GFX90A-LABEL: test_flat_global_add_f64$local
// GFX90A: global_atomic_add_f64
diff --git a/clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip b/clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip
index 7cdaf782a66fe..1438b69d82719 100644
--- a/clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip
+++ b/clang/test/SemaHIP/amdgpu-flat-atomic-fadd-err.hip
@@ -1,6 +1,5 @@
// RUN: %clang_cc1 -fsyntax-only -triple amdgcn -target-cpu gfx942 -verify %s -fcuda-is-device
// RUN: %clang_cc1 -fsyntax-only -triple x86_64 -aux-triple amdgcn -verify %s
-// expected-no-diagnostics
#define __device__ __attribute__((device))
#define __shared__ __attribute__((shared))
@@ -13,8 +12,8 @@ __device__ void test_flat_atomic_fadd_f32_valid(float *ptr, float val) {
__device__ void test_flat_atomic_fadd_f32_errors(float *ptr, float val,
double *ptr_d) {
float result;
- result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr, val, 0);
- result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr_d, val);
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr, val, 0); // expected-error{{too many arguments to function call, expected 2, have 3}}
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr_d, val); // expected-error{{cannot initialize a parameter of type}}
}
__device__ void test_flat_atomic_fadd_f64_valid(double *ptr, double val) {
@@ -25,6 +24,6 @@ __device__ void test_flat_atomic_fadd_f64_valid(double *ptr, double val) {
__device__ void test_flat_atomic_fadd_f64_errors(double *ptr, double val,
float *ptr_f) {
double result;
- result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr, val, 0);
- result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr_f, val);
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr, val, 0); // expected-error{{too many arguments to function call, expected 2, have 3}}
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr_f, val); // expected-error{{cannot initialize a parameter of type}}
}
>From 3b9be88694e8ae396fad79a4c4c914bb60c96acd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?=
<jmartinezcaamao at gmail.com>
Date: Wed, 24 Dec 2025 10:10:15 +0100
Subject: [PATCH 3/3] Pre-commit test: SemaHIP tests
---
.../CodeGenHIP/amdgpu-flat-atomic-fadd.hip | 175 ++++++++++++++++++
1 file changed, 175 insertions(+)
create mode 100644 clang/test/CodeGenHIP/amdgpu-flat-atomic-fadd.hip
diff --git a/clang/test/CodeGenHIP/amdgpu-flat-atomic-fadd.hip b/clang/test/CodeGenHIP/amdgpu-flat-atomic-fadd.hip
new file mode 100644
index 0000000000000..97b26d2a097ed
--- /dev/null
+++ b/clang/test/CodeGenHIP/amdgpu-flat-atomic-fadd.hip
@@ -0,0 +1,175 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx942 -emit-llvm -fcuda-is-device %s -o - | FileCheck %s
+
+#define __device__ __attribute__((device))
+#define __shared__ __attribute__((shared))
+#define __constant__ __attribute__((constant))
+
+__constant__ float const_float;
+__constant__ double const_double;
+__device__ float global_float;
+__device__ double global_double;
+
+// CHECK-LABEL: define dso_local void @_Z30test_flat_atomic_fadd_f32_flatPff(
+// CHECK-SAME: ptr noundef [[PTR:%.*]], float noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[RESULT:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr
+// CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT: [[RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT]] to ptr
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store float [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4:![0-9]+]], !amdgpu.ignore.denormal.mode [[META4]]
+// CHECK-NEXT: store float [[TMP2]], ptr [[RESULT_ASCAST]], align 4
+// CHECK-NEXT: ret void
+//
+__device__ void test_flat_atomic_fadd_f32_flat(float *ptr, float val) {
+ float result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr, val);
+}
+
+// CHECK-LABEL: define dso_local void @_Z30test_flat_atomic_fadd_f64_flatPdd(
+// CHECK-SAME: ptr noundef [[PTR:%.*]], double noundef [[VAL:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT: [[RESULT:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr
+// CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT: [[RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT]] to ptr
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store double [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[VAL_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], double [[TMP1]] syncscope("agent") monotonic, align 8, !amdgpu.no.fine.grained.memory [[META4]]
+// CHECK-NEXT: store double [[TMP2]], ptr [[RESULT_ASCAST]], align 8
+// CHECK-NEXT: ret void
+//
+__device__ void test_flat_atomic_fadd_f64_flat(double *ptr, double val) {
+ double result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr, val);
+}
+
+// CHECK-LABEL: define dso_local void @_Z32test_flat_atomic_fadd_f32_sharedPff(
+// CHECK-SAME: ptr noundef [[PTR:%.*]], float noundef [[VAL:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[RESULT:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr
+// CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT: [[RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT]] to ptr
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store float [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.ignore.denormal.mode [[META4]]
+// CHECK-NEXT: store float [[TMP2]], ptr [[RESULT_ASCAST]], align 4
+// CHECK-NEXT: ret void
+//
+__device__ void test_flat_atomic_fadd_f32_shared(__shared__ float *ptr, float val) {
+ float result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(ptr, val);
+}
+
+// CHECK-LABEL: define dso_local void @_Z32test_flat_atomic_fadd_f64_sharedPdd(
+// CHECK-SAME: ptr noundef [[PTR:%.*]], double noundef [[VAL:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT: [[RESULT:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr
+// CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT: [[RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT]] to ptr
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store double [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load double, ptr [[VAL_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], double [[TMP1]] syncscope("agent") monotonic, align 8, !amdgpu.no.fine.grained.memory [[META4]]
+// CHECK-NEXT: store double [[TMP2]], ptr [[RESULT_ASCAST]], align 8
+// CHECK-NEXT: ret void
+//
+__device__ void test_flat_atomic_fadd_f64_shared(__shared__ double *ptr, double val) {
+ double result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(ptr, val);
+}
+
+// CHECK-LABEL: define dso_local void @_Z34test_flat_atomic_fadd_f32_constantf(
+// CHECK-SAME: float noundef [[VAL:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[RESULT:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT: [[RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT]] to ptr
+// CHECK-NEXT: store float [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(4) @const_float to ptr), float [[TMP0]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.ignore.denormal.mode [[META4]]
+// CHECK-NEXT: store float [[TMP1]], ptr [[RESULT_ASCAST]], align 4
+// CHECK-NEXT: ret void
+//
+__device__ void test_flat_atomic_fadd_f32_constant(float val) {
+ float result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(&const_float, val);
+}
+
+// CHECK-LABEL: define dso_local void @_Z34test_flat_atomic_fadd_f64_constantd(
+// CHECK-SAME: double noundef [[VAL:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT: [[RESULT:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT: [[RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT]] to ptr
+// CHECK-NEXT: store double [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[VAL_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(4) @const_double to ptr), double [[TMP0]] syncscope("agent") monotonic, align 8, !amdgpu.no.fine.grained.memory [[META4]]
+// CHECK-NEXT: store double [[TMP1]], ptr [[RESULT_ASCAST]], align 8
+// CHECK-NEXT: ret void
+//
+__device__ void test_flat_atomic_fadd_f64_constant(double val) {
+ double result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(&const_double, val);
+}
+
+// CHECK-LABEL: define dso_local void @_Z32test_flat_atomic_fadd_f32_globalf(
+// CHECK-SAME: float noundef [[VAL:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[RESULT:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT: [[RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT]] to ptr
+// CHECK-NEXT: store float [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @global_float to ptr), float [[TMP0]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.ignore.denormal.mode [[META4]]
+// CHECK-NEXT: store float [[TMP1]], ptr [[RESULT_ASCAST]], align 4
+// CHECK-NEXT: ret void
+//
+__device__ void test_flat_atomic_fadd_f32_global(float val) {
+ float result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f32(&global_float, val);
+}
+
+// CHECK-LABEL: define dso_local void @_Z32test_flat_atomic_fadd_f64_globald(
+// CHECK-SAME: double noundef [[VAL:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT: [[RESULT:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT: [[RESULT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT]] to ptr
+// CHECK-NEXT: store double [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[VAL_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @global_double to ptr), double [[TMP0]] syncscope("agent") monotonic, align 8, !amdgpu.no.fine.grained.memory [[META4]]
+// CHECK-NEXT: store double [[TMP1]], ptr [[RESULT_ASCAST]], align 8
+// CHECK-NEXT: ret void
+//
+__device__ void test_flat_atomic_fadd_f64_global(double val) {
+ double result;
+ result = __builtin_amdgcn_flat_atomic_fadd_f64(&global_double, val);
+}
+//.
+// CHECK: [[META4]] = !{}
+//.
More information about the cfe-commits
mailing list