[clang] [CIR] Atomic fetch operation (PR #161631)
Sirui Mu via cfe-commits
cfe-commits at lists.llvm.org
Sat Oct 18 06:42:18 PDT 2025
https://github.com/Lancern updated https://github.com/llvm/llvm-project/pull/161631
>From 415e4a592f5fcb90a5aeb80eb2e0366265878a20 Mon Sep 17 00:00:00 2001
From: Sirui Mu <msrlancern at gmail.com>
Date: Thu, 2 Oct 2025 14:13:11 +0800
Subject: [PATCH] [CIR] Atomic fetch operation
This patch adds CIR support for atomic fetch operations, including the intrinsic
functions `__atomic_fetch_<binop>`, `__atomic_<binop>_fetch`, and
`__c11_atomic_fetch_<binop>`, where `<binop>` could be `add`, `sub`, `max`,
`min`, `and`, `or`, `xor`, and `nand`.
---
clang/include/clang/CIR/Dialect/IR/CIROps.td | 73 +++
clang/lib/CIR/CodeGen/CIRGenAtomic.cpp | 143 ++++-
clang/lib/CIR/Dialect/IR/CIRDialect.cpp | 15 +
.../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 141 +++++
clang/test/CIR/CodeGen/atomic.c | 523 ++++++++++++++++++
clang/test/CIR/IR/invalid-atomic.cir | 7 +
6 files changed, 878 insertions(+), 24 deletions(-)
create mode 100644 clang/test/CIR/IR/invalid-atomic.cir
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index e0163a4fecd5f..49caed6b8950d 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -4454,6 +4454,79 @@ def CIR_TryOp : CIR_Op<"try",[
// Atomic operations
//===----------------------------------------------------------------------===//
+def CIR_AtomicFetchKind : CIR_I32EnumAttr<
+ "AtomicFetchKind", "Binary opcode for atomic fetch-and-update operations", [
+ I32EnumAttrCase<"Add", 0, "add">,
+ I32EnumAttrCase<"Sub", 1, "sub">,
+ I32EnumAttrCase<"And", 2, "and">,
+ I32EnumAttrCase<"Xor", 3, "xor">,
+ I32EnumAttrCase<"Or", 4, "or">,
+ I32EnumAttrCase<"Nand", 5, "nand">,
+ I32EnumAttrCase<"Max", 6, "max">,
+ I32EnumAttrCase<"Min", 7, "min">
+]>;
+
+def CIR_AtomicFetchOp : CIR_Op<"atomic.fetch", [
+ AllTypesMatch<["result", "val"]>,
+ TypesMatchWith<"type of 'val' must match the pointee type of 'ptr'",
+ "ptr", "val", "mlir::cast<cir::PointerType>($_self).getPointee()">
+]> {
+ let summary = "Atomic fetch-and-update operation";
+ let description = [{
+ C/C++ atomic fetch-and-update operation. This operation implements the C/C++
+ builtin functions `__atomic_<binop>_fetch`, `__atomic_fetch_<binop>`, and
+ `__c11_atomic_fetch_<binop>`, where `<binop>` is one of the following binary
+ opcodes: `add`, `sub`, `and`, `xor`, `or`, `nand`, `max`, and `min`.
+
+ This operation takes 2 arguments: a pointer `ptr` and a value `val`. The
+ type of `val` must match the pointee type of `ptr`. And the type of `val`
+ must either be an integer or a floating-point type. If the binary operation
+ is neither `add` nor `sub`, then `val` must be an integer.
+
+ This operation atomically loads the value from `ptr`, performs the binary
+ operation as indicated by `binop` on the loaded value and `val`, and stores
+ the result back to `ptr`. If the `fetch_first` flag is present, the result
+ of this operation is the old value loaded from `ptr` before the binary
+ operation. Otherwise, the result of this operation is the result of the
+ binary operation.
+
+ Example:
+ %res = cir.atomic.fetch add seq_cst %ptr, %val
+ : (!cir.ptr<!s32i>, !s32i) -> !s32i
+ }];
+ let results = (outs CIR_AnyIntOrFloatType:$result);
+ let arguments = (ins
+ Arg<CIR_PtrToIntOrFloatType, "", [MemRead, MemWrite]>:$ptr,
+ CIR_AnyIntOrFloatType:$val,
+ CIR_AtomicFetchKind:$binop,
+ Arg<CIR_MemOrder, "memory order">:$mem_order,
+ UnitAttr:$is_volatile,
+ UnitAttr:$fetch_first
+ );
+
+ let assemblyFormat = [{
+ $binop $mem_order
+ (`fetch_first` $fetch_first^)?
+ $ptr `,` $val
+ (`volatile` $is_volatile^)?
+ `:` `(` qualified(type($ptr)) `,` qualified(type($val)) `)`
+ `->` type($result) attr-dict
+ }];
+
+ let hasVerifier = 1;
+
+ let extraLLVMLoweringPatternDecl = [{
+ mlir::Value buildPostOp(cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Value rmwVal, bool isInt) const;
+
+ mlir::Value buildMinMaxPostOp(cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter,
+ mlir::Value rmwVal, bool isInt,
+ bool isSigned) const;
+ }];
+}
+
def CIR_AtomicXchgOp : CIR_Op<"atomic.xchg", [
AllTypesMatch<["result", "val"]>,
TypesMatchWith<"type of 'val' must match the pointee type of 'ptr'",
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index a9983f882e28c..958a8f41124da 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -346,6 +346,8 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
CIRGenBuilderTy &builder = cgf.getBuilder();
mlir::Location loc = cgf.getLoc(expr->getSourceRange());
auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
+ cir::AtomicFetchKindAttr fetchAttr;
+ bool fetchFirst = true;
switch (expr->getOp()) {
case AtomicExpr::AO__c11_atomic_init:
@@ -407,6 +409,86 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
opName = cir::AtomicXchgOp::getOperationName();
break;
+ case AtomicExpr::AO__atomic_add_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Add);
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Sub);
+ break;
+
+ case AtomicExpr::AO__atomic_min_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_min:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Min);
+ break;
+
+ case AtomicExpr::AO__atomic_max_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_max:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Max);
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::And);
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Or);
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Xor);
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ fetchFirst = false;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ opName = cir::AtomicFetchOp::getOperationName();
+ fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
+ cir::AtomicFetchKind::Nand);
+ break;
+
case AtomicExpr::AO__opencl_atomic_init:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
@@ -433,74 +515,50 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__scoped_atomic_exchange_n:
case AtomicExpr::AO__scoped_atomic_exchange:
- case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__scoped_atomic_add_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
- case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__scoped_atomic_fetch_add:
- case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__scoped_atomic_sub_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__scoped_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__scoped_atomic_min_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__scoped_atomic_fetch_min:
- case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__scoped_atomic_max_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__scoped_atomic_fetch_max:
- case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__scoped_atomic_and_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
- case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__scoped_atomic_fetch_and:
- case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__scoped_atomic_or_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
- case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__scoped_atomic_fetch_or:
- case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__scoped_atomic_xor_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__scoped_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_nand_fetch:
case AtomicExpr::AO__scoped_atomic_nand_fetch:
- case AtomicExpr::AO__c11_atomic_fetch_nand:
- case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__scoped_atomic_fetch_nand:
case AtomicExpr::AO__atomic_test_and_set:
@@ -518,9 +576,13 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
atomicOperands, atomicResTys);
+ if (fetchAttr)
+ rmwOp->setAttr("binop", fetchAttr);
rmwOp->setAttr("mem_order", orderAttr);
if (expr->isVolatile())
rmwOp->setAttr("is_volatile", builder.getUnitAttr());
+ if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
+ rmwOp->setAttr("fetch_first", builder.getUnitAttr());
mlir::Value result = rmwOp->getResult(0);
builder.createStore(loc, result, dest);
@@ -614,8 +676,41 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
isWeakExpr = e->getWeak();
break;
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (memTy->isPointerType()) {
+ cgm.errorNYI(e->getSourceRange(),
+ "atomic fetch-and-add and fetch-and-sub for pointers");
+ return RValue::get(nullptr);
+ }
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ shouldCastToIntPtrTy = !memTy->isFloatingType();
+ [[fallthrough]];
+
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_store:
val1 = emitValToTemp(*this, e->getVal1());
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index b4c37048cbe5b..c1482a80a00cc 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2934,6 +2934,21 @@ mlir::LogicalResult cir::ThrowOp::verify() {
return failure();
}
+//===----------------------------------------------------------------------===//
+// AtomicFetchOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::AtomicFetchOp::verify() {
+ if (getBinop() != cir::AtomicFetchKind::Add &&
+ getBinop() != cir::AtomicFetchKind::Sub &&
+ getBinop() != cir::AtomicFetchKind::Max &&
+ getBinop() != cir::AtomicFetchKind::Min &&
+ !mlir::isa<cir::IntType>(getVal().getType()))
+ return emitError("only atomic add, sub, max, and min operation could "
+ "operate on floating-point values");
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// TypeInfoAttr
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 0243bf120f396..6189a098663a5 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -730,6 +730,147 @@ mlir::LogicalResult CIRToLLVMAtomicXchgOpLowering::matchAndRewrite(
return mlir::success();
}
+static mlir::LLVM::AtomicBinOp
+getLLVMAtomicBinOp(cir::AtomicFetchKind k, bool isInt, bool isSignedInt) {
+ switch (k) {
+ case cir::AtomicFetchKind::Add:
+ return isInt ? mlir::LLVM::AtomicBinOp::add : mlir::LLVM::AtomicBinOp::fadd;
+ case cir::AtomicFetchKind::Sub:
+ return isInt ? mlir::LLVM::AtomicBinOp::sub : mlir::LLVM::AtomicBinOp::fsub;
+ case cir::AtomicFetchKind::And:
+ return mlir::LLVM::AtomicBinOp::_and;
+ case cir::AtomicFetchKind::Xor:
+ return mlir::LLVM::AtomicBinOp::_xor;
+ case cir::AtomicFetchKind::Or:
+ return mlir::LLVM::AtomicBinOp::_or;
+ case cir::AtomicFetchKind::Nand:
+ return mlir::LLVM::AtomicBinOp::nand;
+ case cir::AtomicFetchKind::Max: {
+ if (!isInt)
+ return mlir::LLVM::AtomicBinOp::fmax;
+ return isSignedInt ? mlir::LLVM::AtomicBinOp::max
+ : mlir::LLVM::AtomicBinOp::umax;
+ }
+ case cir::AtomicFetchKind::Min: {
+ if (!isInt)
+ return mlir::LLVM::AtomicBinOp::fmin;
+ return isSignedInt ? mlir::LLVM::AtomicBinOp::min
+ : mlir::LLVM::AtomicBinOp::umin;
+ }
+ }
+ llvm_unreachable("Unknown atomic fetch opcode");
+}
+
+static llvm::StringLiteral getLLVMBinop(cir::AtomicFetchKind k, bool isInt) {
+ switch (k) {
+ case cir::AtomicFetchKind::Add:
+ return isInt ? mlir::LLVM::AddOp::getOperationName()
+ : mlir::LLVM::FAddOp::getOperationName();
+ case cir::AtomicFetchKind::Sub:
+ return isInt ? mlir::LLVM::SubOp::getOperationName()
+ : mlir::LLVM::FSubOp::getOperationName();
+ case cir::AtomicFetchKind::And:
+ return mlir::LLVM::AndOp::getOperationName();
+ case cir::AtomicFetchKind::Xor:
+ return mlir::LLVM::XOrOp::getOperationName();
+ case cir::AtomicFetchKind::Or:
+ return mlir::LLVM::OrOp::getOperationName();
+ case cir::AtomicFetchKind::Nand:
+ // There's no nand binop in LLVM, this is later fixed with a not.
+ return mlir::LLVM::AndOp::getOperationName();
+ case cir::AtomicFetchKind::Max:
+ case cir::AtomicFetchKind::Min:
+ llvm_unreachable("handled in buildMinMaxPostOp");
+ }
+ llvm_unreachable("Unknown atomic fetch opcode");
+}
+
+mlir::Value CIRToLLVMAtomicFetchOpLowering::buildPostOp(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal,
+ bool isInt) const {
+ SmallVector<mlir::Value> atomicOperands = {rmwVal, adaptor.getVal()};
+ SmallVector<mlir::Type> atomicResTys = {rmwVal.getType()};
+ return rewriter
+ .create(op.getLoc(),
+ rewriter.getStringAttr(getLLVMBinop(op.getBinop(), isInt)),
+ atomicOperands, atomicResTys, {})
+ ->getResult(0);
+}
+
+mlir::Value CIRToLLVMAtomicFetchOpLowering::buildMinMaxPostOp(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter, mlir::Value rmwVal, bool isInt,
+ bool isSigned) const {
+ mlir::Location loc = op.getLoc();
+
+ if (!isInt) {
+ if (op.getBinop() == cir::AtomicFetchKind::Max)
+ return mlir::LLVM::MaxNumOp::create(rewriter, loc, rmwVal,
+ adaptor.getVal());
+ return mlir::LLVM::MinNumOp::create(rewriter, loc, rmwVal,
+ adaptor.getVal());
+ }
+
+ mlir::LLVM::ICmpPredicate pred;
+ if (op.getBinop() == cir::AtomicFetchKind::Max) {
+ pred = isSigned ? mlir::LLVM::ICmpPredicate::sgt
+ : mlir::LLVM::ICmpPredicate::ugt;
+ } else { // Min
+ pred = isSigned ? mlir::LLVM::ICmpPredicate::slt
+ : mlir::LLVM::ICmpPredicate::ult;
+ }
+ mlir::Value cmp = mlir::LLVM::ICmpOp::create(
+ rewriter, loc,
+ mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(), pred), rmwVal,
+ adaptor.getVal());
+ return mlir::LLVM::SelectOp::create(rewriter, loc, cmp, rmwVal,
+ adaptor.getVal());
+}
+
+mlir::LogicalResult CIRToLLVMAtomicFetchOpLowering::matchAndRewrite(
+ cir::AtomicFetchOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ bool isInt = false;
+ bool isSignedInt = false;
+ if (auto intTy = mlir::dyn_cast<cir::IntType>(op.getVal().getType())) {
+ isInt = true;
+ isSignedInt = intTy.isSigned();
+ } else if (mlir::isa<cir::SingleType, cir::DoubleType>(
+ op.getVal().getType())) {
+ isInt = false;
+ } else {
+ return op.emitError() << "Unsupported type: " << op.getVal().getType();
+ }
+
+ mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(op.getMemOrder());
+ mlir::LLVM::AtomicBinOp llvmBinOp =
+ getLLVMAtomicBinOp(op.getBinop(), isInt, isSignedInt);
+ auto rmwVal = mlir::LLVM::AtomicRMWOp::create(rewriter, op.getLoc(),
+ llvmBinOp, adaptor.getPtr(),
+ adaptor.getVal(), llvmOrder);
+
+ mlir::Value result = rmwVal.getResult();
+ if (!op.getFetchFirst()) {
+ if (op.getBinop() == cir::AtomicFetchKind::Max ||
+ op.getBinop() == cir::AtomicFetchKind::Min)
+ result = buildMinMaxPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt,
+ isSignedInt);
+ else
+ result = buildPostOp(op, adaptor, rewriter, rmwVal.getRes(), isInt);
+
+ // Compensate lack of nand binop in LLVM IR.
+ if (op.getBinop() == cir::AtomicFetchKind::Nand) {
+ auto negOne = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
+ result.getType(), -1);
+ result = mlir::LLVM::XOrOp::create(rewriter, op.getLoc(), result, negOne);
+ }
+ }
+
+ rewriter.replaceOp(op, result);
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite(
cir::BitClrsbOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index 440010a0b6938..9dcadd04c7ff0 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -514,3 +514,526 @@ void atomic_exchange_n(int *ptr, int value) {
// OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} acq_rel, align 4
// OGCG: %{{.+}} = atomicrmw xchg ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
}
+
+int atomic_fetch_add(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_add
+ // LLVM-LABEL: @atomic_fetch_add
+ // OGCG-LABEL: @atomic_fetch_add
+
+ return __atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_add_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_add_fetch
+ // LLVM-LABEL: @atomic_add_fetch
+ // OGCG-LABEL: @atomic_add_fetch
+
+ return __atomic_add_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw add ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = add i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw add ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = add i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_add(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_add
+ // LLVM-LABEL: @c11_atomic_fetch_add
+ // OGCG-LABEL: @c11_atomic_fetch_add
+
+ return __c11_atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw add ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_sub(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_sub
+ // LLVM-LABEL: @atomic_fetch_sub
+ // OGCG-LABEL: @atomic_fetch_sub
+
+ return __atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_sub_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_sub_fetch
+ // LLVM-LABEL: @atomic_sub_fetch
+ // OGCG-LABEL: @atomic_sub_fetch
+
+ return __atomic_sub_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw sub ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = sub i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw sub ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = sub i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_sub(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_sub
+ // LLVM-LABEL: @c11_atomic_fetch_sub
+ // OGCG-LABEL: @c11_atomic_fetch_sub
+
+ return __c11_atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw sub ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_add_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_add_fp
+ // LLVM-LABEL: @atomic_fetch_add_fp
+ // OGCG-LABEL: @atomic_fetch_add_fp
+
+ return __atomic_fetch_add(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fadd ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fadd ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_add_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_add_fetch_fp
+ // LLVM-LABEL: @atomic_add_fetch_fp
+ // OGCG-LABEL: @atomic_add_fetch_fp
+
+ return __atomic_add_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch add seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fadd ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = fadd float %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fadd ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = fadd float %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_sub_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_sub_fp
+ // LLVM-LABEL: @c11_atomic_fetch_sub_fp
+ // OGCG-LABEL: @c11_atomic_fetch_sub_fp
+
+ return __c11_atomic_fetch_sub(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch sub seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fsub ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fsub ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_min(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_min
+ // LLVM-LABEL: @atomic_fetch_min
+ // OGCG-LABEL: @atomic_fetch_min
+
+ return __atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_min_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_min_fetch
+ // LLVM-LABEL: @atomic_min_fetch
+ // OGCG-LABEL: @atomic_min_fetch
+
+ return __atomic_min_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw min ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[OLD_LESS:.+]] = icmp slt i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = select i1 %[[OLD_LESS]], i32 %[[OLD]], i32 %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw min ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[OLD_LESS:.+]] = icmp slt i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = select i1 %[[OLD_LESS]], i32 %[[OLD]], i32 %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_min(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_min
+ // LLVM-LABEL: @c11_atomic_fetch_min
+ // OGCG-LABEL: @c11_atomic_fetch_min
+
+ return __c11_atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw min ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_min_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_min_fp
+ // LLVM-LABEL: @atomic_fetch_min_fp
+ // OGCG-LABEL: @atomic_fetch_min_fp
+
+ return __atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_min_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_min_fetch_fp
+ // LLVM-LABEL: @atomic_min_fetch_fp
+ // OGCG-LABEL: @atomic_min_fetch_fp
+
+ return __atomic_min_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fmin ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = call float @llvm.minnum.f32(float %[[OLD]], float %[[VAL]])
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fmin ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = call float @llvm.minnum.f32(float %[[OLD]], float %[[VAL]])
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_min_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_min_fp
+ // LLVM-LABEL: @c11_atomic_fetch_min_fp
+ // OGCG-LABEL: @c11_atomic_fetch_min_fp
+
+ return __c11_atomic_fetch_min(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch min seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmin ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_max(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_max
+ // LLVM-LABEL: @atomic_fetch_max
+ // OGCG-LABEL: @atomic_fetch_max
+
+ return __atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_max_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_max_fetch
+ // LLVM-LABEL: @atomic_max_fetch
+ // OGCG-LABEL: @atomic_max_fetch
+
+ return __atomic_max_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw max ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[OLD_GREATER:.+]] = icmp sgt i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = select i1 %[[OLD_GREATER]], i32 %[[OLD]], i32 %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw max ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[OLD_GREATER:.+]] = icmp sgt i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = select i1 %[[OLD_GREATER]], i32 %[[OLD]], i32 %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_max(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_max
+ // LLVM-LABEL: @c11_atomic_fetch_max
+ // OGCG-LABEL: @c11_atomic_fetch_max
+
+ return __c11_atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw max ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_fetch_max_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_fetch_max_fp
+ // LLVM-LABEL: @atomic_fetch_max_fp
+ // OGCG-LABEL: @atomic_fetch_max_fp
+
+ return __atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float atomic_max_fetch_fp(float *ptr, float value) {
+ // CIR-LABEL: @atomic_max_fetch_fp
+ // LLVM-LABEL: @atomic_max_fetch_fp
+ // OGCG-LABEL: @atomic_max_fetch_fp
+
+ return __atomic_max_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[OLD:.+]] = atomicrmw fmax ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = call float @llvm.maxnum.f32(float %[[OLD]], float %[[VAL]])
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw fmax ptr %{{.+}}, float %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = call float @llvm.maxnum.f32(float %[[OLD]], float %[[VAL]])
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+float c11_atomic_fetch_max_fp(_Atomic(float) *ptr, float value) {
+ // CIR-LABEL: @c11_atomic_fetch_max_fp
+ // LLVM-LABEL: @c11_atomic_fetch_max_fp
+ // OGCG-LABEL: @c11_atomic_fetch_max_fp
+
+ return __c11_atomic_fetch_max(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch max seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+
+ // LLVM: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw fmax ptr %{{.+}}, float %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store float %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_and(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_and
+ // LLVM-LABEL: @atomic_fetch_and
+ // OGCG-LABEL: @atomic_fetch_and
+
+ return __atomic_fetch_and(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_and_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_and_fetch
+ // LLVM-LABEL: @atomic_and_fetch
+ // OGCG-LABEL: @atomic_and_fetch
+
+ return __atomic_and_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw and ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw and ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_and(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_and
+ // LLVM-LABEL: @c11_atomic_fetch_and
+ // OGCG-LABEL: @c11_atomic_fetch_and
+
+ return __c11_atomic_fetch_and(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch and seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw and ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_or(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_or
+ // LLVM-LABEL: @atomic_fetch_or
+ // OGCG-LABEL: @atomic_fetch_or
+
+ return __atomic_fetch_or(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_or_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_or_fetch
+ // LLVM-LABEL: @atomic_or_fetch
+ // OGCG-LABEL: @atomic_or_fetch
+
+ return __atomic_or_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw or ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = or i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw or ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = or i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_or(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_or
+ // LLVM-LABEL: @c11_atomic_fetch_or
+ // OGCG-LABEL: @c11_atomic_fetch_or
+
+ return __c11_atomic_fetch_or(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch or seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw or ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_xor(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_xor
+ // LLVM-LABEL: @atomic_fetch_xor
+ // OGCG-LABEL: @atomic_fetch_xor
+
+ return __atomic_fetch_xor(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_xor_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_xor_fetch
+ // LLVM-LABEL: @atomic_xor_fetch
+ // OGCG-LABEL: @atomic_xor_fetch
+
+ return __atomic_xor_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw xor ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[RES:.+]] = xor i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw xor ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[RES:.+]] = xor i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_xor(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_xor
+ // LLVM-LABEL: @c11_atomic_fetch_xor
+ // OGCG-LABEL: @c11_atomic_fetch_xor
+
+ return __c11_atomic_fetch_xor(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch xor seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw xor ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_fetch_nand(int *ptr, int value) {
+ // CIR-LABEL: @atomic_fetch_nand
+ // LLVM-LABEL: @atomic_fetch_nand
+ // OGCG-LABEL: @atomic_fetch_nand
+
+ return __atomic_fetch_nand(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int atomic_nand_fetch(int *ptr, int value) {
+ // CIR-LABEL: @atomic_nand_fetch
+ // LLVM-LABEL: @atomic_nand_fetch
+ // OGCG-LABEL: @atomic_nand_fetch
+
+ return __atomic_nand_fetch(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[OLD:.+]] = atomicrmw nand ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // LLVM-NEXT: %[[TMP:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // LLVM-NEXT: %[[RES:.+]] = xor i32 %[[TMP]], -1
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[OLD:.+]] = atomicrmw nand ptr %{{.+}}, i32 %[[VAL:.+]] seq_cst, align 4
+ // OGCG-NEXT: %[[TMP:.+]] = and i32 %[[OLD]], %[[VAL]]
+ // OGCG-NEXT: %[[RES:.+]] = xor i32 %[[TMP]], -1
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
+
+int c11_atomic_fetch_nand(_Atomic(int) *ptr, int value) {
+ // CIR-LABEL: @c11_atomic_fetch_nand
+ // LLVM-LABEL: @c11_atomic_fetch_nand
+ // OGCG-LABEL: @c11_atomic_fetch_nand
+
+ return __c11_atomic_fetch_nand(ptr, value, __ATOMIC_SEQ_CST);
+ // CIR: %{{.+}} = cir.atomic.fetch nand seq_cst fetch_first %{{.+}}, %{{.+}} : (!cir.ptr<!s32i>, !s32i) -> !s32i
+
+ // LLVM: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // LLVM-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+
+ // OGCG: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
+ // OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
+}
diff --git a/clang/test/CIR/IR/invalid-atomic.cir b/clang/test/CIR/IR/invalid-atomic.cir
new file mode 100644
index 0000000000000..e378953ee0913
--- /dev/null
+++ b/clang/test/CIR/IR/invalid-atomic.cir
@@ -0,0 +1,7 @@
+// RUN: cir-opt %s -verify-diagnostics -split-input-file
+
+cir.func @f1(%arg0: !cir.ptr<!cir.float>, %arg1: !cir.float) {
+ // expected-error @below {{only atomic add and atomic sub operation could operate on floating-point values}}
+ %0 = cir.atomic.fetch and seq_cst %arg0, %arg1 : (!cir.ptr<!cir.float>, !cir.float) -> !cir.float
+ cir.return
+}
More information about the cfe-commits
mailing list