[llvm] 5fb4a05 - [SPIR-V] Add atomic_init and fix atomic explicit lowering

Michal Paszkowski via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 19 13:16:03 PDT 2022


Author: Michal Paszkowski
Date: 2022-10-19T22:13:29+02:00
New Revision: 5fb4a051485a82e5cc16cf71b9a6089792d5446f

URL: https://github.com/llvm/llvm-project/commit/5fb4a051485a82e5cc16cf71b9a6089792d5446f
DIFF: https://github.com/llvm/llvm-project/commit/5fb4a051485a82e5cc16cf71b9a6089792d5446f.diff

LOG: [SPIR-V] Add atomic_init and fix atomic explicit lowering

Differential Revision: https://reviews.llvm.org/D135902

Added: 
    llvm/test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll

Modified: 
    llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
    llvm/lib/Target/SPIRV/SPIRVBuiltins.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 0b838ab1e8ad..ad2dc0abba4c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -415,6 +415,18 @@ static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder,
   return GR->buildConstantInt(Val, MIRBuilder, IntType);
 }
 
+/// Helper function for translating atomic init to OpStore.
+static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
+                                MachineIRBuilder &MIRBuilder) {
+  assert(Call->Arguments.size() == 2 &&
+         "Need 2 arguments for atomic init translation");
+
+  MIRBuilder.buildInstr(SPIRV::OpStore)
+      .addUse(Call->Arguments[0])
+      .addUse(Call->Arguments[1]);
+  return true;
+}
+
 /// Helper function for building an atomic load instruction.
 static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
                                 MachineIRBuilder &MIRBuilder,
@@ -577,10 +589,10 @@ static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
   if (Call->Arguments.size() >= 4) {
     assert(Call->Arguments.size() == 4 && "Extra args for explicit atomic RMW");
     auto CLScope = static_cast<SPIRV::CLMemoryScope>(
-        getIConstVal(Call->Arguments[5], MRI));
+        getIConstVal(Call->Arguments[3], MRI));
     Scope = getSPIRVScope(CLScope);
     if (CLScope == static_cast<unsigned>(Scope))
-      ScopeRegister = Call->Arguments[5];
+      ScopeRegister = Call->Arguments[3];
   }
   if (!ScopeRegister.isValid())
     ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GR);
@@ -595,7 +607,7 @@ static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
         getSPIRVMemSemantics(Order) |
         getMemSemanticsForStorageClass(GR->getPointerStorageClass(PtrRegister));
     if (Order == Semantics)
-      MemSemanticsReg = Call->Arguments[3];
+      MemSemanticsReg = Call->Arguments[2];
   }
   if (!MemSemanticsReg.isValid())
     MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
@@ -961,6 +973,8 @@ static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
       SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
 
   switch (Opcode) {
+  case SPIRV::OpStore:
+    return buildAtomicInitInst(Call, MIRBuilder);
   case SPIRV::OpAtomicLoad:
     return buildAtomicLoadInst(Call, MIRBuilder, GR);
   case SPIRV::OpAtomicStore:
@@ -973,6 +987,7 @@ static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
   case SPIRV::OpAtomicOr:
   case SPIRV::OpAtomicXor:
   case SPIRV::OpAtomicAnd:
+  case SPIRV::OpAtomicExchange:
     return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
   case SPIRV::OpMemoryBarrier:
     return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
@@ -1558,15 +1573,15 @@ Optional<bool> lowerBuiltin(const StringRef DemangledCall,
       lookupBuiltin(DemangledCall, Set, ReturnRegister, ReturnType, Args);
 
   if (!Call) {
-    LLVM_DEBUG(dbgs() << "Builtin record was not found!");
-    return {};
+    LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
+    return None;
   }
 
   // TODO: check if the provided args meet the builtin requirments.
   assert(Args.size() >= Call->Builtin->MinNumArgs &&
          "Too few arguments to generate the builtin");
-  if (Call->Builtin->MaxNumArgs && Args.size() <= Call->Builtin->MaxNumArgs)
-    LLVM_DEBUG(dbgs() << "More arguments provided than required!");
+  if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
+    LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
 
   // Match the builtin with implementation based on the grouping.
   switch (Call->Builtin->Group) {

diff  --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
index da63ef53a53b..fc8909e8a96a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
@@ -493,10 +493,11 @@ defm : DemangledNativeBuiltin<"all", OpenCL_std, Relational, 1, 1, OpAll>;
 defm : DemangledNativeBuiltin<"__spirv_All", OpenCL_std, Relational, 1, 1, OpAll>;
 
 // Atomic builtin records:
+defm : DemangledNativeBuiltin<"atomic_init", OpenCL_std, Atomic, 2, 2, OpStore>;
 defm : DemangledNativeBuiltin<"atomic_load", OpenCL_std, Atomic, 1, 1, OpAtomicLoad>;
 defm : DemangledNativeBuiltin<"atomic_load_explicit", OpenCL_std, Atomic, 2, 3, OpAtomicLoad>;
 defm : DemangledNativeBuiltin<"atomic_store", OpenCL_std, Atomic, 2, 2, OpAtomicStore>;
-defm : DemangledNativeBuiltin<"atomic_store_explicit", OpenCL_std, Atomic, 2, 2, OpAtomicStore>;
+defm : DemangledNativeBuiltin<"atomic_store_explicit", OpenCL_std, Atomic, 2, 4, OpAtomicStore>;
 defm : DemangledNativeBuiltin<"atomic_compare_exchange_strong", OpenCL_std, Atomic, 3, 6, OpAtomicCompareExchange>;
 defm : DemangledNativeBuiltin<"atomic_compare_exchange_strong_explicit", OpenCL_std, Atomic, 5, 6, OpAtomicCompareExchange>;
 defm : DemangledNativeBuiltin<"atomic_compare_exchange_weak", OpenCL_std, Atomic, 3, 6, OpAtomicCompareExchangeWeak>;

diff  --git a/llvm/test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll b/llvm/test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll
new file mode 100644
index 000000000000..409fc5021d0b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll
@@ -0,0 +1,53 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+
+;; Types:
+; CHECK:         %[[#F32:]] = OpTypeFloat 32
+;; Constants:
+; CHECK:         %[[#CONST:]] = OpConstant %[[#F32]] 1065353216
+;; Atomic instructions:
+; CHECK:         OpStore %[[#]] %[[#CONST]]
+; CHECK-COUNT-3: OpAtomicStore
+; CHECK-COUNT-3: OpAtomicLoad
+; CHECK-COUNT-3: OpAtomicExchange
+
+define spir_kernel void @test_atomic_kernel(float addrspace(3)* %ff) local_unnamed_addr #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !5 !kernel_arg_base_type !6 !kernel_arg_type_qual !7 {
+entry:
+  %0 = addrspacecast float addrspace(3)* %ff to float addrspace(4)*
+  tail call spir_func void @_Z11atomic_initPU3AS4VU7_Atomicff(float addrspace(4)* %0, float 1.000000e+00) #2
+  tail call spir_func void @_Z12atomic_storePU3AS4VU7_Atomicff(float addrspace(4)* %0, float 1.000000e+00) #2
+  tail call spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order(float addrspace(4)* %0, float 1.000000e+00, i32 0) #2
+  tail call spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(float addrspace(4)* %0, float 1.000000e+00, i32 0, i32 1) #2
+  %call = tail call spir_func float @_Z11atomic_loadPU3AS4VU7_Atomicf(float addrspace(4)* %0) #2
+  %call1 = tail call spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order(float addrspace(4)* %0, i32 0) #2
+  %call2 = tail call spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order12memory_scope(float addrspace(4)* %0, i32 0, i32 1) #2
+  %call3 = tail call spir_func float @_Z15atomic_exchangePU3AS4VU7_Atomicff(float addrspace(4)* %0, float 1.000000e+00) #2
+  %call4 = tail call spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order(float addrspace(4)* %0, float 1.000000e+00, i32 0) #2
+  %call5 = tail call spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(float addrspace(4)* %0, float 1.000000e+00, i32 0, i32 1) #2
+  ret void
+}
+
+declare spir_func void @_Z11atomic_initPU3AS4VU7_Atomicff(float addrspace(4)*, float)
+
+declare spir_func void @_Z12atomic_storePU3AS4VU7_Atomicff(float addrspace(4)*, float)
+
+declare spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order(float addrspace(4)*, float, i32)
+
+declare spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(float addrspace(4)*, float, i32, i32)
+
+declare spir_func float @_Z11atomic_loadPU3AS4VU7_Atomicf(float addrspace(4)*)
+
+declare spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order(float addrspace(4)*, i32)
+
+declare spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order12memory_scope(float addrspace(4)*, i32, i32)
+
+declare spir_func float @_Z15atomic_exchangePU3AS4VU7_Atomicff(float addrspace(4)*, float)
+
+declare spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order(float addrspace(4)*, float, i32)
+
+declare spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(float addrspace(4)*, float, i32, i32)
+
+!3 = !{i32 3}
+!4 = !{!"none"}
+!5 = !{!"atomic_float*"}
+!6 = !{!"_Atomic(float)*"}
+!7 = !{!"volatile"}


        


More information about the llvm-commits mailing list