[Mlir-commits] [mlir] 9185896 - [mlir][llvm] Add atomic support to the StoreOp.

Tobias Gysi llvmlistbot at llvm.org
Mon Feb 20 00:07:57 PST 2023


Author: Tobias Gysi
Date: 2023-02-20T09:04:27+01:00
New Revision: 9185896a36457f8e6858c9a4c789975fc067228a

URL: https://github.com/llvm/llvm-project/commit/9185896a36457f8e6858c9a4c789975fc067228a
DIFF: https://github.com/llvm/llvm-project/commit/9185896a36457f8e6858c9a4c789975fc067228a.diff

LOG: [mlir][llvm] Add atomic support to the StoreOp.

This revision adds atomic support to the StoreOp. It chooses
to print the atomic keywords together with the syncscope and
ordering arguments. The revision also implements verifiers to
ensure the constraints that apply to atomic store operations
are checked.

Depends on D144112

Reviewed By: Dinistro

Differential Revision: https://reviews.llvm.org/D144200

Added: 
    

Modified: 
    mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
    mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
    mlir/test/Dialect/LLVMIR/invalid.mlir
    mlir/test/Dialect/LLVMIR/roundtrip.mlir
    mlir/test/Target/LLVMIR/Import/instructions.ll
    mlir/test/Target/LLVMIR/llvmir.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index 8d02ae167b646..80818ff4f6284 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -430,16 +430,47 @@ def LLVM_StoreOp : LLVM_Op<"store">, MemoryOpBase {
                    OptionalAttr<SymbolRefArrayAttr>:$alias_scopes,
                    OptionalAttr<SymbolRefArrayAttr>:$noalias_scopes,
                    OptionalAttr<SymbolRefArrayAttr>:$tbaa,
-                   OptionalAttr<I64Attr>:$alignment, UnitAttr:$volatile_,
-                   UnitAttr:$nontemporal);
+                   OptionalAttr<I64Attr>:$alignment,
+                   UnitAttr:$volatile_,
+                   UnitAttr:$nontemporal,
+                   DefaultValuedAttr<AtomicOrdering,
+                                     "AtomicOrdering::not_atomic">:$ordering,
+                   OptionalAttr<StrAttr>:$syncscope);
   string llvmInstName = "Store";
+  let description = [{
+    The `store` operation is used to write to memory. A store may be marked as
+    atomic, volatile, and/or nontemporal, and takes a number of optional
+    attributes that specify aliasing information.
+
+    An atomic store only supports a limited set of pointer, integer, and
+    floating point types, and requires an explicit alignment.
+
+    Examples:
+    ```mlir
+    // A volatile store of a float variable.
+    llvm.store volatile %val, %ptr : f32, !llvm.ptr
+
+    // A nontemporal store of a float variable.
+    llvm.store %val, %ptr {nontemporal} : f32, !llvm.ptr
+
+    // An atomic store of an integer variable.
+    llvm.store %val, %ptr atomic monotonic {alignment = 8 : i64}
+        : i64, !llvm.ptr
+    ```
+
+    See the following link for more details:
+    https://llvm.org/docs/LangRef.html#store-instruction
+  }];
   let assemblyFormat = [{
-    (`volatile` $volatile_^)? $value `,` $addr attr-dict `:`
-    custom<StoreType>(type($value), type($addr))
+    (`volatile` $volatile_^)? $value `,` $addr
+    (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)?
+    attr-dict `:` custom<StoreType>(type($value), type($addr))
   }];
   string llvmBuilder = [{
     auto *inst = builder.CreateStore($value, $addr, $volatile_);
-  }] # setAlignmentCode
+  }] # setOrderingCode
+     # setSyncScopeCode
+     # setAlignmentCode
      # setNonTemporalMetadataCode
      # setAccessGroupsMetadataCode
      # setAliasScopeMetadataCode
@@ -449,12 +480,16 @@ def LLVM_StoreOp : LLVM_Op<"store">, MemoryOpBase {
     unsigned alignment = storeInst->getAlign().value();
     $_op = $_builder.create<LLVM::StoreOp>($_location, $value, $addr,
         alignment, storeInst->isVolatile(),
-        storeInst->hasMetadata(llvm::LLVMContext::MD_nontemporal));
+        storeInst->hasMetadata(llvm::LLVMContext::MD_nontemporal),
+        convertAtomicOrderingFromLLVM(storeInst->getOrdering()),
+        getLLVMSyncScope(storeInst));
   }];
   let builders = [
     OpBuilder<(ins "Value":$value, "Value":$addr,
       CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
-      CArg<"bool", "false">:$isNonTemporal)>
+      CArg<"bool", "false">:$isNonTemporal,
+      CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering,
+      CArg<"StringRef", "StringRef()">:$syncscope)>
   ];
   let hasVerifier = 1;
 }

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 66766aa0b6d10..f13f56f5911ad 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -826,21 +826,35 @@ static bool isTypeCompatibleWithAtomicOp(Type type, bool isPointerTypeAllowed) {
          *bitWidth == 64;
 }
 
-LogicalResult LoadOp::verify() {
-  if (getOrdering() != AtomicOrdering::not_atomic) {
-    if (!isTypeCompatibleWithAtomicOp(getResult().getType(),
+/// Verifies the attributes and the type of atomic memory access operations.
+template <typename OpTy>
+LogicalResult verifyAtomicMemOp(OpTy memOp, Type valueType,
+                                ArrayRef<AtomicOrdering> unsupportedOrderings) {
+  if (memOp.getOrdering() != AtomicOrdering::not_atomic) {
+    if (!isTypeCompatibleWithAtomicOp(valueType,
                                       /*isPointerTypeAllowed=*/true))
-      return emitOpError("unsupported type ")
-             << getResult().getType() << " for atomic access";
-    if (getOrdering() == AtomicOrdering::release ||
-        getOrdering() == AtomicOrdering::acq_rel)
-      return emitOpError("unsupported ordering '")
-             << stringifyAtomicOrdering(getOrdering()) << "'";
-    if (!getAlignment())
-      return emitOpError("expected alignment for atomic access");
-  } else if (getSyncscope()) {
-    return emitOpError("expected syncscope to be null for non-atomic access");
+      return memOp.emitOpError("unsupported type ")
+             << valueType << " for atomic access";
+    if (llvm::is_contained(unsupportedOrderings, memOp.getOrdering()))
+      return memOp.emitOpError("unsupported ordering '")
+             << stringifyAtomicOrdering(memOp.getOrdering()) << "'";
+    if (!memOp.getAlignment())
+      return memOp.emitOpError("expected alignment for atomic access");
+    return success();
   }
+  if (memOp.getSyncscope())
+    return memOp.emitOpError(
+        "expected syncscope to be null for non-atomic access");
+  return success();
+}
+
+LogicalResult LoadOp::verify() {
+  Type valueType = getResult().getType();
+  if (failed(verifyAtomicMemOp(
+          *this, valueType,
+          {AtomicOrdering::release, AtomicOrdering::acq_rel})))
+    return failure();
+
   return verifyMemOpMetadata(*this);
 }
 
@@ -914,15 +928,25 @@ static void printLoadType(OpAsmPrinter &printer, Operation *op, Type type,
 // StoreOp
 //===----------------------------------------------------------------------===//
 
-LogicalResult StoreOp::verify() { return verifyMemOpMetadata(*this); }
+LogicalResult StoreOp::verify() {
+  Type valueType = getValue().getType();
+  if (failed(verifyAtomicMemOp(
+          *this, valueType,
+          {AtomicOrdering::acquire, AtomicOrdering::acq_rel})))
+    return failure();
+
+  return verifyMemOpMetadata(*this);
+}
 
 void StoreOp::build(OpBuilder &builder, OperationState &state, Value value,
                     Value addr, unsigned alignment, bool isVolatile,
-                    bool isNonTemporal) {
+                    bool isNonTemporal, AtomicOrdering ordering,
+                    StringRef syncscope) {
   build(builder, state, value, addr, /*access_groups=*/nullptr,
         /*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, /*tbaa=*/nullptr,
         alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile,
-        isNonTemporal);
+        isNonTemporal, ordering,
+        syncscope.empty() ? nullptr : builder.getStringAttr(syncscope));
 }
 
 /// Parses the StoreOp type either using the typed or opaque pointer format.

diff  --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir
index cbcc633ae11b7..642639b18deec 100644
--- a/mlir/test/Dialect/LLVMIR/invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/invalid.mlir
@@ -209,6 +209,41 @@ func.func @store_malformed_elem_type(%foo: !llvm.ptr, %bar: f32) {
 
 // -----
 
+func.func @store_syncscope(%val : f32, %ptr : !llvm.ptr) {
+  // expected-error at below {{expected syncscope to be null for non-atomic access}}
+  "llvm.store"(%val, %ptr) {syncscope = "singlethread"} : (f32, !llvm.ptr) -> ()
+}
+
+// -----
+
+func.func @store_unsupported_ordering(%val : f32, %ptr : !llvm.ptr) {
+  // expected-error at below {{unsupported ordering 'acquire'}}
+  llvm.store %val, %ptr atomic acquire {alignment = 4 : i64} : f32, !llvm.ptr
+}
+
+// -----
+
+func.func @store_unsupported_type(%val : f80, %ptr : !llvm.ptr) {
+  // expected-error at below {{unsupported type 'f80' for atomic access}}
+  llvm.store %val, %ptr atomic monotonic {alignment = 16 : i64} : f80, !llvm.ptr
+}
+
+// -----
+
+func.func @store_unsupported_type(%val : i1, %ptr : !llvm.ptr) {
+  // expected-error at below {{unsupported type 'i1' for atomic access}}
+  llvm.store %val, %ptr atomic monotonic {alignment = 16 : i64} : i1, !llvm.ptr
+}
+
+// -----
+
+func.func @store_unaligned_atomic(%val : f32, %ptr : !llvm.ptr) {
+  // expected-error at below {{expected alignment for atomic access}}
+  llvm.store %val, %ptr atomic monotonic : f32, !llvm.ptr
+}
+
+// -----
+
 func.func @invalid_call() {
   // expected-error at +1 {{'llvm.call' op must have either a `callee` attribute or at least an operand}}
   "llvm.call"() : () -> ()

diff  --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index ebab8c35b90a7..1e49b4e3124ec 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -348,6 +348,15 @@ func.func @atomic_load(%ptr : !llvm.ptr) {
   llvm.return
 }
 
+// CHECK-LABEL: @atomic_store
+func.func @atomic_store(%val : f32, %ptr : !llvm.ptr) {
+  // CHECK: llvm.store %{{.*}}, %{{.*}} atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr
+  llvm.store %val, %ptr atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr
+  // CHECK: llvm.store volatile %{{.*}}, %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr
+  llvm.store volatile %val, %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr
+  llvm.return
+}
+
 // CHECK-LABEL: @atomicrmw
 func.func @atomicrmw(%ptr : !llvm.ptr, %val : f32) {
   // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32

diff  --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll
index f66d7b9cdf699..47076a7abb018 100644
--- a/mlir/test/Target/LLVMIR/Import/instructions.ll
+++ b/mlir/test/Target/LLVMIR/Import/instructions.ll
@@ -368,13 +368,18 @@ define void @load_store(ptr %ptr) {
 
 ; // -----
 
-; CHECK-LABEL: @atomic_load
+; CHECK-LABEL: @atomic_load_store
 ; CHECK-SAME:  %[[PTR:[a-zA-Z0-9]+]]
-define void @atomic_load(ptr %ptr) {
+define void @atomic_load_store(ptr %ptr) {
   ; CHECK:  %[[V1:[0-9]+]] = llvm.load %[[PTR]] atomic acquire {alignment = 8 : i64} : !llvm.ptr -> f64
   ; CHECK:  %[[V2:[0-9]+]] = llvm.load volatile %[[PTR]] atomic syncscope("singlethreaded") acquire {alignment = 16 : i64} : !llvm.ptr -> f64
   %1 = load atomic double, ptr %ptr acquire, align 8
   %2 = load atomic volatile double, ptr %ptr syncscope("singlethreaded") acquire, align 16
+
+  ; CHECK:  llvm.store %[[V1]], %[[PTR]] atomic release {alignment = 8 : i64} : f64, !llvm.ptr
+  ; CHECK:  llvm.store volatile %[[V2]], %[[PTR]] atomic syncscope("singlethreaded") release {alignment = 16 : i64} : f64, !llvm.ptr
+  store atomic double %1, ptr %ptr release, align 8
+  store atomic volatile double %2, ptr %ptr syncscope("singlethreaded") release, align 16
   ret void
 }
 

diff  --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index da7dc7f78ba59..a58dfde99b053 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -1780,13 +1780,20 @@ llvm.func @nontemporal_store_and_load() {
 
 // -----
 
-llvm.func @atomic_load(%ptr : !llvm.ptr) {
+llvm.func @atomic_store_and_load(%ptr : !llvm.ptr) {
   // CHECK: load atomic
-  // CHECK-SAME:  monotonic, align 4
-  %1 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
+  // CHECK-SAME:  acquire, align 4
+  %1 = llvm.load %ptr atomic acquire {alignment = 4 : i64} : !llvm.ptr -> f32
   // CHECK: load atomic
-  // CHECK-SAME:  syncscope("singlethread") monotonic, align 4
-  %2 = llvm.load %ptr atomic syncscope("singlethread") monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
+  // CHECK-SAME:  syncscope("singlethread") acquire, align 4
+  %2 = llvm.load %ptr atomic syncscope("singlethread") acquire {alignment = 4 : i64} : !llvm.ptr -> f32
+
+  // CHECK: store atomic
+  // CHECK-SAME:  release, align 4
+  llvm.store %1, %ptr atomic release {alignment = 4 : i64} : f32, !llvm.ptr
+  // CHECK: store atomic
+  // CHECK-SAME:  syncscope("singlethread") release, align 4
+  llvm.store %2, %ptr atomic syncscope("singlethread") release {alignment = 4 : i64} : f32, !llvm.ptr
   llvm.return
 }
 


        


More information about the Mlir-commits mailing list