[llvm-branch-commits] [clang] clang/AMDGPU: Emit atomicrmw for global/flat fadd v2bf16 builtins (PR #96875)
Matt Arsenault via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Jun 27 02:11:25 PDT 2024
https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/96875
None
>From 94d04eb6576b811e11175ca36a340649a63bf007 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 26 Jun 2024 19:34:43 +0200
Subject: [PATCH] clang/AMDGPU: Emit atomicrmw for global/flat fadd v2bf16
builtins
---
clang/lib/CodeGen/CGBuiltin.cpp | 26 ++++++-------------
.../builtins-fp-atomics-gfx12.cl | 18 ++++++++++---
.../builtins-fp-atomics-gfx940.cl | 10 +++++--
3 files changed, 30 insertions(+), 24 deletions(-)
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 54e363d6fd0e8..4bbb4375ee997 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -18681,22 +18681,6 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
return Builder.CreateCall(F, {Addr, Val});
}
- case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
- case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: {
- Intrinsic::ID IID;
- switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
- IID = Intrinsic::amdgcn_global_atomic_fadd_v2bf16;
- break;
- case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16:
- IID = Intrinsic::amdgcn_flat_atomic_fadd_v2bf16;
- break;
- }
- llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
- llvm::Value *Val = EmitScalarExpr(E->getArg(1));
- llvm::Function *F = CGM.getIntrinsic(IID, {Addr->getType()});
- return Builder.CreateCall(F, {Addr, Val});
- }
case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_i32:
case AMDGPU::BI__builtin_amdgcn_global_load_tr_b64_v2i32:
case AMDGPU::BI__builtin_amdgcn_global_load_tr_b128_v4i16:
@@ -19068,7 +19052,9 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
- case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64: {
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: {
llvm::AtomicRMWInst::BinOp BinOp;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
@@ -19090,6 +19076,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16:
BinOp = llvm::AtomicRMWInst::FAdd;
break;
}
@@ -19126,7 +19114,9 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
AO = AtomicOrdering::SequentiallyConsistent;
// The v2bf16 builtin uses i16 instead of a natural bfloat type.
- if (BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16) {
+ if (BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2bf16 ||
+ BuiltinID == AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16 ||
+ BuiltinID == AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16) {
llvm::Type *V2BF16Ty = FixedVectorType::get(
llvm::Type::getBFloatTy(Builder.getContext()), 2);
Val = Builder.CreateBitCast(Val, V2BF16Ty);
diff --git a/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx12.cl b/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx12.cl
index cf304d7b0818a..8d6bb948b0a7a 100644
--- a/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx12.cl
+++ b/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx12.cl
@@ -11,7 +11,7 @@ typedef short __attribute__((ext_vector_type(2))) short2;
// CHECK-LABEL: test_local_add_2bf16
// CHECK: [[BC0:%.+]] = bitcast <2 x i16> {{.+}} to <2 x bfloat>
-// CHECK: [[RMW:%.+]] = atomicrmw fadd ptr addrspace(3) %{{.+}}, <2 x bfloat> [[BC0]] syncscope("agent") seq_cst, align 4
+// CHECK-NEXT: [[RMW:%.+]] = atomicrmw fadd ptr addrspace(3) %{{.+}}, <2 x bfloat> [[BC0]] syncscope("agent") seq_cst, align 4
// CHECK-NEXT: bitcast <2 x bfloat> [[RMW]] to <2 x i16>
// GFX12-LABEL: test_local_add_2bf16
@@ -57,7 +57,10 @@ half2 test_flat_add_2f16(__generic half2 *addr, half2 x) {
}
// CHECK-LABEL: test_flat_add_2bf16
-// CHECK: call <2 x i16> @llvm.amdgcn.flat.atomic.fadd.v2bf16.p0(ptr %{{.*}}, <2 x i16> %{{.*}})
+// CHECK: [[BC:%.+]] = bitcast <2 x i16> %{{.+}} to <2 x bfloat>
+// CHECK: [[RMW:%.+]] = atomicrmw fadd ptr %{{.+}}, <2 x bfloat> [[BC]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+// CHECK: bitcast <2 x bfloat> [[RMW]] to <2 x i16>
+
// GFX12-LABEL: test_flat_add_2bf16
// GFX12: flat_atomic_pk_add_bf16
short2 test_flat_add_2bf16(__generic short2 *addr, short2 x) {
@@ -84,7 +87,11 @@ void test_global_add_half2_noret(__global half2 *addr, half2 x) {
}
// CHECK-LABEL: test_global_add_2bf16
-// CHECK: call <2 x i16> @llvm.amdgcn.global.atomic.fadd.v2bf16.p1(ptr addrspace(1) %{{.*}}, <2 x i16> %{{.*}})
+// CHECK: [[BC:%.+]] = bitcast <2 x i16> %{{.+}} to <2 x bfloat>
+// CHECK: [[RMW:%.+]] = atomicrmw fadd ptr addrspace(1) %{{.+}}, <2 x bfloat> [[BC]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+// CHECK: bitcast <2 x bfloat> [[RMW]] to <2 x i16>
+
+
// GFX12-LABEL: test_global_add_2bf16
// GFX12: global_atomic_pk_add_bf16 v2, v[0:1], v2, off th:TH_ATOMIC_RETURN
void test_global_add_2bf16(__global short2 *addr, short2 x) {
@@ -93,7 +100,10 @@ void test_global_add_2bf16(__global short2 *addr, short2 x) {
}
// CHECK-LABEL: test_global_add_2bf16_noret
-// CHECK: call <2 x i16> @llvm.amdgcn.global.atomic.fadd.v2bf16.p1(ptr addrspace(1) %{{.*}}, <2 x i16> %{{.*}})
+// CHECK: [[BC:%.+]] = bitcast <2 x i16> %{{.+}} to <2 x bfloat>
+// CHECK: [[RMW:%.+]] = atomicrmw fadd ptr addrspace(1) %{{.+}}, <2 x bfloat> [[BC]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+// CHECK: bitcast <2 x bfloat> [[RMW]] to <2 x i16>
+
// GFX12-LABEL: test_global_add_2bf16_noret
// GFX12: global_atomic_pk_add_bf16 v[0:1], v2, off
void test_global_add_2bf16_noret(__global short2 *addr, short2 x) {
diff --git a/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx940.cl b/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx940.cl
index 2618e2809fbbf..9e2ba787f575a 100644
--- a/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx940.cl
+++ b/clang/test/CodeGenOpenCL/builtins-fp-atomics-gfx940.cl
@@ -28,7 +28,10 @@ half2 test_flat_add_2f16(__generic half2 *addr, half2 x) {
}
// CHECK-LABEL: test_flat_add_2bf16
-// CHECK: call <2 x i16> @llvm.amdgcn.flat.atomic.fadd.v2bf16.p0(ptr %{{.*}}, <2 x i16> %{{.*}})
+// CHECK: [[BC0:%.+]] = bitcast <2 x i16> {{.+}} to <2 x bfloat>
+// CHECK: [[RMW:%.+]] = atomicrmw fadd ptr %{{.+}}, <2 x bfloat> [[BC0]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+// CHECK-NEXT: bitcast <2 x bfloat> [[RMW]] to <2 x i16>
+
// GFX940-LABEL: test_flat_add_2bf16
// GFX940: flat_atomic_pk_add_bf16
short2 test_flat_add_2bf16(__generic short2 *addr, short2 x) {
@@ -36,7 +39,10 @@ short2 test_flat_add_2bf16(__generic short2 *addr, short2 x) {
}
// CHECK-LABEL: test_global_add_2bf16
-// CHECK: call <2 x i16> @llvm.amdgcn.global.atomic.fadd.v2bf16.p1(ptr addrspace(1) %{{.*}}, <2 x i16> %{{.*}})
+// CHECK: [[BC0:%.+]] = bitcast <2 x i16> {{.+}} to <2 x bfloat>
+// CHECK: [[RMW:%.+]] = atomicrmw fadd ptr addrspace(1) %{{.+}}, <2 x bfloat> [[BC0]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+// CHECK-NEXT: bitcast <2 x bfloat> [[RMW]] to <2 x i16>
+
// GFX940-LABEL: test_global_add_2bf16
// GFX940: global_atomic_pk_add_bf16
short2 test_global_add_2bf16(__global short2 *addr, short2 x) {
More information about the llvm-branch-commits
mailing list