[clang] [CIR] Upstream __sync_<OP>_and_fetch builtins (PR #168347)
Hendrik Hübner via cfe-commits
cfe-commits at lists.llvm.org
Mon Nov 17 03:15:19 PST 2025
https://github.com/HendrikHuebner created https://github.com/llvm/llvm-project/pull/168347
This PR upstreams support for several `__sync_<OP>_and_fetch` builtins. Additionally, some needed helper methods are added.
>From ca2031e7b81f82250984e781145c3386ae08dcea Mon Sep 17 00:00:00 2001
From: hhuebner <hendrik.huebner18 at gmail.com>
Date: Mon, 17 Nov 2025 11:42:01 +0100
Subject: [PATCH] [CIR] Upstream __sync_up_and_fetch builtins
---
clang/lib/CIR/CodeGen/Address.h | 28 +-
clang/lib/CIR/CodeGen/CIREHScopeStack.h | 279 ++++++++++++++
clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 268 +++++++++++++
clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 23 ++
clang/lib/CIR/CodeGen/CIRGenFunction.h | 5 +
clang/test/CIR/CodeGen/atomic.c | 491 ++++++++++++++++++++++++
6 files changed, 1092 insertions(+), 2 deletions(-)
create mode 100644 clang/lib/CIR/CodeGen/CIREHScopeStack.h
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h
index c8ce530a7b0d3..02a24a86b3c84 100644
--- a/clang/lib/CIR/CodeGen/Address.h
+++ b/clang/lib/CIR/CodeGen/Address.h
@@ -45,8 +45,12 @@ class Address {
public:
Address(mlir::Value pointer, mlir::Type elementType,
clang::CharUnits alignment)
- : pointerAndKnownNonNull(pointer, false), elementType(elementType),
- alignment(alignment) {
+ : Address(pointer, elementType, alignment, false) {}
+
+ Address(mlir::Value pointer, mlir::Type elementType,
+ clang::CharUnits alignment, bool pointerAndKnownNonNull)
+ : pointerAndKnownNonNull(pointer, pointerAndKnownNonNull),
+ elementType(elementType), alignment(alignment) {
assert(pointer && "Pointer cannot be null");
assert(elementType && "Element type cannot be null");
assert(!alignment.isZero() && "Alignment cannot be zero");
@@ -77,6 +81,13 @@ class Address {
return Address(newPtr, getElementType(), getAlignment());
}
+ /// Return address with different alignment, but same pointer and element
+ /// type.
+ Address withAlignment(clang::CharUnits newAlignment) const {
+ return Address(getPointer(), getElementType(), newAlignment,
+ isKnownNonNull());
+ }
+
/// Return address with different element type, a bitcast pointer, and
/// the same alignment.
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const;
@@ -133,6 +144,19 @@ class Address {
template <typename OpTy> OpTy getDefiningOp() const {
return mlir::dyn_cast_or_null<OpTy>(getDefiningOp());
}
+
+ /// Whether the pointer is known not to be null.
+ bool isKnownNonNull() const {
+ assert(isValid() && "Invalid address");
+ return static_cast<bool>(pointerAndKnownNonNull.getInt());
+ }
+
+ /// Set the non-null bit.
+ Address setKnownNonNull() {
+ assert(isValid() && "Invalid address");
+ pointerAndKnownNonNull.setInt(true);
+ return *this;
+ }
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIREHScopeStack.h b/clang/lib/CIR/CodeGen/CIREHScopeStack.h
new file mode 100644
index 0000000000000..c7b86a06339a8
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIREHScopeStack.h
@@ -0,0 +1,279 @@
+//===-- EHScopeStack.h - Stack for cleanup CIR generation -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes should be the minimum interface required for other parts of
+// CIR CodeGen to emit cleanups. The implementation is in CIRGenCleanup.cpp and
+// other implemenentation details that are not widely needed are in
+// CIRGenCleanup.h.
+//
+// TODO(cir): this header should be shared between LLVM and CIR codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
+#include "clang/lib/CodeGen/EHScopeStack.h"
+
+namespace clang::CIRGen {
+
+class CIRGenFunction;
+
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ mlir::Block *optimisticBranchBlock = nullptr;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ mlir::Block *destination = nullptr;
+
+ /// The destination index value.
+ unsigned destinationIndex = 0;
+
+ /// The initial branch of the fixup.
+ cir::BrOp initialBranch = {};
+};
+
+enum CleanupKind : unsigned {
+ /// Denotes a cleanup that should run when a scope is exited using exceptional
+ /// control flow (a throw statement leading to stack unwinding, ).
+ EHCleanup = 0x1,
+
+ /// Denotes a cleanup that should run when a scope is exited using normal
+ /// control flow (falling off the end of the scope, return, goto, ...).
+ NormalCleanup = 0x2,
+
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ LifetimeMarker = 0x8,
+ NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+ friend class CIRGenFunction;
+
+public:
+ // TODO(ogcg): Switch to alignof(uint64_t) instead of 8
+ enum { ScopeStackAlignment = 8 };
+
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from startOfData to endOfBuffer.
+ ptrdiff_t size = -1;
+
+ explicit stable_iterator(ptrdiff_t size) : size(size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() = default;
+
+ bool isValid() const { return size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator other) const { return size <= other.size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return size < I.size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.size == B.size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.size != B.size;
+ }
+ };
+
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class LLVM_MOVABLE_POLYMORPHIC_TYPE Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+
+ public:
+ Cleanup(const Cleanup &) = default;
+ Cleanup(Cleanup &&) {}
+ Cleanup() = default;
+
+ virtual ~Cleanup() = default;
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param flags cleanup kind.
+ virtual void emit(CIRGenFunction &cgf) = 0;
+ };
+
+private:
+ // The implementation for this class is in CIRGenCleanup.h and
+ // CIRGenCleanup.cpp; the definition is here because it's used as a
+ // member of CIRGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ std::unique_ptr<char[]> startOfBuffer;
+
+ /// The end of the buffer.
+ char *endOfBuffer = nullptr;
+
+ /// The first valid entry in the buffer.
+ char *startOfData = nullptr;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator innermostNormalCleanup = stable_end();
+
+ /// The innermost EH scope on the stack.
+ stable_iterator innermostEHScope = stable_end();
+
+ /// The CGF this Stack belong to
+ CIRGenFunction *cgf = nullptr;
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup> branchFixups;
+
+ // This class uses a custom allocator for maximum efficiency because cleanups
+ // are allocated and freed very frequently. It's basically a bump pointer
+ // allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
+ // into the buffer as stable iterators.
+ char *allocate(size_t size);
+ void deallocate(size_t size);
+
+ void *pushCleanup(CleanupKind kind, size_t dataSize);
+
+public:
+ EHScopeStack() = default;
+ ~EHScopeStack() = default;
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
+ static_assert(alignof(T) <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *buffer = pushCleanup(kind, sizeof(T));
+ [[maybe_unused]] Cleanup *obj = new (buffer) T(a...);
+ }
+
+ void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
+
+ /// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned numHandlers);
+
+ /// Pops a catch scope off the stack. This is private to CIRGenException.cpp.
+ void popCatch();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return startOfData == endOfBuffer; }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return innermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return innermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
+ stable_iterator getInnermostEHScope() const { return innermostEHScope; }
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(endOfBuffer - startOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator savePoint) const;
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ branchFixups.push_back(BranchFixup());
+ return branchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return branchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned i) {
+ assert(i < getNumBranchFixups());
+ return branchFixups[i];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
+};
+
+} // namespace clang::CIRGen
+
+#endif // CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 77f19343653db..a0a350ebe031c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/CIR/Dialect/IR/CIRTypes.h"
#include "clang/CIR/MissingFeatures.h"
#include "llvm/Support/ErrorHandling.h"
@@ -58,6 +59,107 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e,
return RValue::get(result);
}
+/// Emit the conversions required to turn the given value into an
+/// integer of the given size.
+static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
+ cir::IntType intType) {
+ v = cgf.emitToMemory(v, t);
+
+ if (isa<cir::PointerType>(v.getType()))
+ return cgf.getBuilder().createPtrToInt(v, intType);
+
+ assert(v.getType() == intType);
+ return v;
+}
+
+static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
+ mlir::Type resultType) {
+ v = cgf.emitFromMemory(v, t);
+
+ if (isa<cir::PointerType>(resultType))
+ return cgf.getBuilder().createIntToPtr(v, resultType);
+
+ assert(v.getType() == resultType);
+ return v;
+}
+
+static Address checkAtomicAlignment(CIRGenFunction &cgf, const CallExpr *e) {
+ ASTContext &astContext = cgf.getContext();
+ Address ptr = cgf.emitPointerWithAlignment(e->getArg(0));
+ unsigned bytes =
+ isa<cir::PointerType>(ptr.getElementType())
+ ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity()
+ : cgf.cgm.getDataLayout().getTypeSizeInBits(ptr.getElementType()) / 8;
+ unsigned align = ptr.getAlignment().getQuantity();
+ if (align % bytes != 0) {
+ DiagnosticsEngine &diags = cgf.cgm.getDiags();
+ diags.Report(e->getBeginLoc(), diag::warn_sync_op_misaligned);
+ // Force address to be at least naturally-aligned.
+ return ptr.withAlignment(CharUnits::fromQuantity(bytes));
+ }
+ return ptr;
+}
+
+/// Utility to insert an atomic instruction based on Intrinsic::ID
+/// and the expression node.
+static mlir::Value makeBinaryAtomicValue(
+ CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr,
+ mlir::Value *neededValP = nullptr,
+ cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) {
+
+ QualType type = expr->getType();
+ QualType ptrType = expr->getArg(0)->getType();
+
+ assert(ptrType->isPointerType());
+ assert(
+ cgf.getContext().hasSameUnqualifiedType(type, ptrType->getPointeeType()));
+ assert(cgf.getContext().hasSameUnqualifiedType(type,
+ expr->getArg(1)->getType()));
+
+ Address destAddr = checkAtomicAlignment(cgf, expr);
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ cir::IntType intType =
+ ptrType->getPointeeType()->isUnsignedIntegerType()
+ ? builder.getUIntNTy(cgf.getContext().getTypeSize(type))
+ : builder.getSIntNTy(cgf.getContext().getTypeSize(type));
+ mlir::Value val = cgf.emitScalarExpr(expr->getArg(1));
+ mlir::Type valueType = val.getType();
+ val = emitToInt(cgf, val, type, intType);
+
+ // This output argument is needed for post atomic fetch operations
+ // that calculate the result of the operation as return value of
+ // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the
+ // memory location and returns the old value.
+ if (neededValP) {
+ *neededValP = val;
+ }
+
+ auto rmwi = cir::AtomicFetchOp::create(
+ builder, cgf.getLoc(expr->getSourceRange()), destAddr.emitRawPointer(),
+ val, kind, ordering, false, /* is volatile */
+ true); /* fetch first */
+ return emitFromInt(cgf, rmwi->getResult(0), type, valueType);
+}
+
+static RValue emitBinaryAtomicPost(CIRGenFunction &cgf,
+ cir::AtomicFetchKind atomicOpkind,
+ const CallExpr *e, cir::BinOpKind binopKind,
+ bool invert = false) {
+ mlir::Value val;
+ clang::QualType typ = e->getType();
+ mlir::Value result = makeBinaryAtomicValue(cgf, atomicOpkind, e, &val);
+ clang::CIRGen::CIRGenBuilderTy &builder = cgf.getBuilder();
+ result = cir::BinOp::create(builder, result.getLoc(), binopKind, result, val);
+
+ if (invert) {
+ result = cir::UnaryOp::create(builder, result.getLoc(),
+ cir::UnaryOpKind::Not, result);
+ }
+
+ result = emitFromInt(cgf, result, typ, val.getType());
+ return RValue::get(result);
+}
+
RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
mlir::Value input = emitScalarExpr(e->getArg(0));
mlir::Value amount = emitScalarExpr(e->getArg(1));
@@ -520,6 +622,172 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
return RValue::get(nullptr);
}
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_nand:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_swap:
+ llvm_unreachable("Shouldn't make it through sema");
+
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ llvm_unreachable("BI__sync_fetch_and_add NYI");
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ llvm_unreachable("BI__sync_fetch_and_sub NYI");
+
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ llvm_unreachable("BI__sync_fetch_and_or NYI");
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ llvm_unreachable("BI__sync_fetch_and_and NYI");
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ llvm_unreachable("BI__sync_fetch_and_xor NYI");
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ llvm_unreachable("BI__sync_fetch_and_nand NYI");
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ llvm_unreachable("BI__sync_fetch_and_min NYI");
+ case Builtin::BI__sync_fetch_and_max:
+ llvm_unreachable("BI__sync_fetch_and_max NYI");
+ case Builtin::BI__sync_fetch_and_umin:
+ llvm_unreachable("BI__sync_fetch_and_umin NYI");
+ case Builtin::BI__sync_fetch_and_umax:
+ llvm_unreachable("BI__sync_fetch_and_umax NYI");
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Add, e,
+ cir::BinOpKind::Add);
+
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Sub, e,
+ cir::BinOpKind::Sub);
+
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::And, e,
+ cir::BinOpKind::And);
+
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Or, e,
+ cir::BinOpKind::Or);
+
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Xor, e,
+ cir::BinOpKind::Xor);
+
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Nand, e,
+ cir::BinOpKind::And, true);
+
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ llvm_unreachable("BI__sync_val_compare_and_swap NYI");
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ llvm_unreachable("BI__sync_bool_compare_and_swap NYI");
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ llvm_unreachable("BI__sync_swap1 like NYI");
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ llvm_unreachable("BI__sync_lock_test_and_set_1 like NYI");
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16:
+ llvm_unreachable("BI__sync_lock_release_1 like NYI");
+ case Builtin::BI__sync_synchronize:
+ llvm_unreachable("BI__sync_synchronize NYI");
+ case Builtin::BI__builtin_nontemporal_load:
+ llvm_unreachable("BI__builtin_nontemporal_load NYI");
+ case Builtin::BI__builtin_nontemporal_store:
+ llvm_unreachable("BI__builtin_nontemporal_store NYI");
+ case Builtin::BI__c11_atomic_is_lock_free:
+ llvm_unreachable("BI__c11_atomic_is_lock_free NYI");
+ case Builtin::BI__atomic_is_lock_free:
+ llvm_unreachable("BI__atomic_is_lock_free NYI");
+ case Builtin::BI__atomic_test_and_set:
+ llvm_unreachable("BI__atomic_test_and_set NYI");
+ case Builtin::BI__atomic_clear:
+ llvm_unreachable("BI__atomic_clear NYI");
+ case Builtin::BI__atomic_thread_fence:
+ llvm_unreachable("BI__atomic_thread_fence NYI");
+ case Builtin::BI__atomic_signal_fence:
+ llvm_unreachable("BI__atomic_signal_fence NYI");
+ case Builtin::BI__c11_atomic_thread_fence:
+ case Builtin::BI__c11_atomic_signal_fence:
+ llvm_unreachable("BI__c11_atomic_thread_fence like NYI");
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 91a59d60fcb3e..ac633a012d592 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -553,6 +553,14 @@ mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
return value;
}
+mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) {
+ if (!ty->isBooleanType() && hasBooleanRepresentation(ty)) {
+ llvm_unreachable("NIY");
+ }
+
+ return value;
+}
+
void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
bool isInit) {
if (lvalue.getType()->isConstantMatrixType()) {
@@ -1921,6 +1929,21 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
return callResult;
}
+// TODO: this can also be abstrated into common AST helpers
+bool CIRGenFunction::hasBooleanRepresentation(QualType type) {
+
+ if (type->isBooleanType())
+ return true;
+
+ if (const EnumType *enumType = type->getAs<EnumType>())
+ return enumType->getDecl()->getIntegerType()->isBooleanType();
+
+ if (const AtomicType *atomicType = type->getAs<AtomicType>())
+ return hasBooleanRepresentation(atomicType->getValueType());
+
+ return false;
+}
+
CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *e) {
e = e->IgnoreParens();
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 00f289bcd1bb2..be6facfd77e04 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -1370,6 +1370,7 @@ class CIRGenFunction : public CIRGenTypeCache {
RValue emitCallExpr(const clang::CallExpr *e,
ReturnValueSlot returnValue = ReturnValueSlot());
LValue emitCallExprLValue(const clang::CallExpr *e);
+ bool hasBooleanRepresentation(QualType type);
CIRGenCallee emitCallee(const clang::Expr *e);
template <typename T>
@@ -1756,6 +1757,10 @@ class CIRGenFunction : public CIRGenTypeCache {
/// to conserve the high level information.
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty);
+ /// EmitFromMemory - Change a scalar value from its memory
+ /// representation to its value representation.
+ mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty);
+
/// Emit a trap instruction, which is used to abort the program in an abnormal
/// way, usually for debugging purposes.
/// \p createNewBlock indicates whether to create a new block for the IR
diff --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
index d5bea8446d730..fd814459d614b 100644
--- a/clang/test/CIR/CodeGen/atomic.c
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -1133,3 +1133,494 @@ int c11_atomic_fetch_nand(_Atomic(int) *ptr, int value) {
// OGCG: %[[RES:.+]] = atomicrmw nand ptr %{{.+}}, i32 %{{.+}} seq_cst, align 4
// OGCG-NEXT: store i32 %[[RES]], ptr %{{.+}}, align 4
}
+
+// CHECK-LABEL: @test_op_and_fetch
+// LLVM-LABEL: @test_op_and_fetch
+void test_op_and_fetch() {
+ signed char sc;
+ unsigned char uc;
+ signed short ss;
+ unsigned short us;
+ signed int si;
+ unsigned int ui;
+ signed long long sll;
+ unsigned long long ull;
+
+ // CHECK: [[VAL0:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s8i
+ // CHECK: [[RES0:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
+ // CHECK: [[RET0:%.*]] = cir.binop(add, [[RES0]], [[VAL0]]) : !s8i
+ // LLVM: [[VAL0:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES0:%.*]] = atomicrmw add ptr %{{.*}}, i8 [[VAL0]] seq_cst, align 1
+ // LLVM: [[RET0:%.*]] = add i8 [[RES0]], [[VAL0]]
+ // LLVM: store i8 [[RET0]], ptr %{{.*}}, align 1
+ sc = __sync_add_and_fetch(&sc, uc);
+
+ // CHECK: [[RES1:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
+ // CHECK: [[RET1:%.*]] = cir.binop(add, [[RES1]], [[VAL1]]) : !u8i
+ // LLVM: [[VAL1:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES1:%.*]] = atomicrmw add ptr %{{.*}}, i8 [[VAL1]] seq_cst, align 1
+ // LLVM: [[RET1:%.*]] = add i8 [[RES1]], [[VAL1]]
+ // LLVM: store i8 [[RET1]], ptr %{{.*}}, align 1
+ uc = __sync_add_and_fetch(&uc, uc);
+
+ // CHECK: [[VAL2:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s16i
+ // CHECK: [[RES2:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
+ // CHECK: [[RET2:%.*]] = cir.binop(add, [[RES2]], [[VAL2]]) : !s16i
+ // LLVM: [[VAL2:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
+ // LLVM: [[RES2:%.*]] = atomicrmw add ptr %{{.*}}, i16 [[CONV2]] seq_cst, align 2
+ // LLVM: [[RET2:%.*]] = add i16 [[RES2]], [[CONV2]]
+ // LLVM: store i16 [[RET2]], ptr %{{.*}}, align 2
+ ss = __sync_add_and_fetch(&ss, uc);
+
+ // CHECK: [[VAL3:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u16i
+ // CHECK: [[RES3:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
+ // CHECK: [[RET3:%.*]] = cir.binop(add, [[RES3]], [[VAL3]]) : !u16i
+ // LLVM: [[VAL3:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
+ // LLVM: [[RES3:%.*]] = atomicrmw add ptr %{{.*}}, i16 [[CONV3]] seq_cst, align 2
+ // LLVM: [[RET3:%.*]] = add i16 [[RES3]], [[CONV3]]
+ // LLVM: store i16 [[RET3]], ptr %{{.*}}
+ us = __sync_add_and_fetch(&us, uc);
+
+ // CHECK: [[VAL4:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s32i
+ // CHECK: [[RES4:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
+ // CHECK: [[RET4:%.*]] = cir.binop(add, [[RES4]], [[VAL4]]) : !s32i
+ // LLVM: [[VAL4:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
+ // LLVM: [[RES4:%.*]] = atomicrmw add ptr %{{.*}}, i32 [[CONV4]] seq_cst, align 4
+ // LLVM: [[RET4:%.*]] = add i32 [[RES4]], [[CONV4]]
+ // LLVM: store i32 [[RET4]], ptr %{{.*}}, align 4
+ si = __sync_add_and_fetch(&si, uc);
+
+ // CHECK: [[VAL5:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u32i
+ // CHECK: [[RES5:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
+ // CHECK: [[RET5:%.*]] = cir.binop(add, [[RES5]], [[VAL5]]) : !u32i
+ // LLVM: [[VAL5:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
+ // LLVM: [[RES5:%.*]] = atomicrmw add ptr %{{.*}}, i32 [[CONV5]] seq_cst, align 4
+ // LLVM: [[RET5:%.*]] = add i32 [[RES5]], [[CONV5]]
+ // LLVM: store i32 [[RET5]], ptr %{{.*}}, align 4
+ ui = __sync_add_and_fetch(&ui, uc);
+
+ // CHECK: [[VAL6:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s64i
+ // CHECK: [[RES6:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
+ // CHECK: [[RET6:%.*]] = cir.binop(add, [[RES6]], [[VAL6]]) : !s64i
+ // LLVM: [[VAL6:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
+ // LLVM: [[RES6:%.*]] = atomicrmw add ptr %{{.*}}, i64 [[CONV6]] seq_cst, align 8
+ // LLVM: [[RET6:%.*]] = add i64 [[RES6]], [[CONV6]]
+ // LLVM: store i64 [[RET6]], ptr %{{.*}}, align 8
+ sll = __sync_add_and_fetch(&sll, uc);
+
+ // CHECK: [[VAL7:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u64i
+ // CHECK: [[RES7:%.*]] = cir.atomic.fetch(add, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
+ // CHECK: [[RET7:%.*]] = cir.binop(add, [[RES7]], [[VAL7]]) : !u64i
+ // LLVM: [[VAL7:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
+ // LLVM: [[RES7:%.*]] = atomicrmw add ptr %{{.*}}, i64 [[CONV7]] seq_cst, align 8
+ // LLVM: [[RET7:%.*]] = add i64 [[RES7]], [[CONV7]]
+ // LLVM: store i64 [[RET7]], ptr %{{.*}}, align 8
+ ull = __sync_add_and_fetch(&ull, uc);
+
+ // CHECK: [[VAL0:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s8i
+ // CHECK: [[RES0:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
+ // CHECK: [[RET0:%.*]] = cir.binop(sub, [[RES0]], [[VAL0]]) : !s8i
+ // LLVM: [[VAL0:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES0:%.*]] = atomicrmw sub ptr %{{.*}}, i8 [[VAL0]] seq_cst, align 1
+ // LLVM: [[RET0:%.*]] = sub i8 [[RES0]], [[VAL0]]
+ // LLVM: store i8 [[RET0]], ptr %{{.*}}, align 1
+ sc = __sync_sub_and_fetch(&sc, uc);
+
+ // CHECK: [[RES1:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
+ // CHECK: [[RET1:%.*]] = cir.binop(sub, [[RES1]], [[VAL1]]) : !u8i
+ // LLVM: [[VAL1:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES1:%.*]] = atomicrmw sub ptr %{{.*}}, i8 [[VAL1]] seq_cst, align 1
+ // LLVM: [[RET1:%.*]] = sub i8 [[RES1]], [[VAL1]]
+ // LLVM: store i8 [[RET1]], ptr %{{.*}}, align 1
+ uc = __sync_sub_and_fetch(&uc, uc);
+
+ // CHECK: [[VAL2:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s16i
+ // CHECK: [[RES2:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
+ // CHECK: [[RET2:%.*]] = cir.binop(sub, [[RES2]], [[VAL2]]) : !s16i
+ // LLVM: [[VAL2:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
+ // LLVM: [[RES2:%.*]] = atomicrmw sub ptr %{{.*}}, i16 [[CONV2]] seq_cst, align 2
+ // LLVM: [[RET2:%.*]] = sub i16 [[RES2]], [[CONV2]]
+ // LLVM: store i16 [[RET2]], ptr %{{.*}}, align 2
+ ss = __sync_sub_and_fetch(&ss, uc);
+
+ // CHECK: [[VAL3:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u16i
+ // CHECK: [[RES3:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
+ // CHECK: [[RET3:%.*]] = cir.binop(sub, [[RES3]], [[VAL3]]) : !u16i
+ // LLVM: [[VAL3:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
+ // LLVM: [[RES3:%.*]] = atomicrmw sub ptr %{{.*}}, i16 [[CONV3]] seq_cst, align 2
+ // LLVM: [[RET3:%.*]] = sub i16 [[RES3]], [[CONV3]]
+ // LLVM: store i16 [[RET3]], ptr %{{.*}}
+ us = __sync_sub_and_fetch(&us, uc);
+
+ // CHECK: [[VAL4:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s32i
+ // CHECK: [[RES4:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
+ // CHECK: [[RET4:%.*]] = cir.binop(sub, [[RES4]], [[VAL4]]) : !s32i
+ // LLVM: [[VAL4:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
+ // LLVM: [[RES4:%.*]] = atomicrmw sub ptr %{{.*}}, i32 [[CONV4]] seq_cst, align 4
+ // LLVM: [[RET4:%.*]] = sub i32 [[RES4]], [[CONV4]]
+ // LLVM: store i32 [[RET4]], ptr %{{.*}}, align 4
+ si = __sync_sub_and_fetch(&si, uc);
+
+ // CHECK: [[VAL5:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u32i
+ // CHECK: [[RES5:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
+ // CHECK: [[RET5:%.*]] = cir.binop(sub, [[RES5]], [[VAL5]]) : !u32i
+ // LLVM: [[VAL5:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
+ // LLVM: [[RES5:%.*]] = atomicrmw sub ptr %{{.*}}, i32 [[CONV5]] seq_cst, align 4
+ // LLVM: [[RET5:%.*]] = sub i32 [[RES5]], [[CONV5]]
+ // LLVM: store i32 [[RET5]], ptr %{{.*}}, align 4
+ ui = __sync_sub_and_fetch(&ui, uc);
+
+ // CHECK: [[VAL6:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s64i
+ // CHECK: [[RES6:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
+ // CHECK: [[RET6:%.*]] = cir.binop(sub, [[RES6]], [[VAL6]]) : !s64i
+ // LLVM: [[VAL6:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
+ // LLVM: [[RES6:%.*]] = atomicrmw sub ptr %{{.*}}, i64 [[CONV6]] seq_cst, align 8
+ // LLVM: [[RET6:%.*]] = sub i64 [[RES6]], [[CONV6]]
+ // LLVM: store i64 [[RET6]], ptr %{{.*}}, align 8
+ sll = __sync_sub_and_fetch(&sll, uc);
+
+ // CHECK: [[VAL7:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u64i
+ // CHECK: [[RES7:%.*]] = cir.atomic.fetch(sub, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
+ // CHECK: [[RET7:%.*]] = cir.binop(sub, [[RES7]], [[VAL7]]) : !u64i
+ // LLVM: [[VAL7:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
+ // LLVM: [[RES7:%.*]] = atomicrmw sub ptr %{{.*}}, i64 [[CONV7]] seq_cst, align 8
+ // LLVM: [[RET7:%.*]] = sub i64 [[RES7]], [[CONV7]]
+ // LLVM: store i64 [[RET7]], ptr %{{.*}}, align 8
+ ull = __sync_sub_and_fetch(&ull, uc);
+
+ // CHECK: [[VAL0:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s8i
+ // CHECK: [[RES0:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
+ // CHECK: [[RET0:%.*]] = cir.binop(and, [[RES0]], [[VAL0]]) : !s8i
+ // LLVM: [[VAL0:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES0:%.*]] = atomicrmw and ptr %{{.*}}, i8 [[VAL0]] seq_cst, align 1
+ // LLVM: [[RET0:%.*]] = and i8 [[RES0]], [[VAL0]]
+ // LLVM: store i8 [[RET0]], ptr %{{.*}}, align 1
+ sc = __sync_and_and_fetch(&sc, uc);
+
+ // CHECK: [[RES1:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
+ // CHECK: [[RET1:%.*]] = cir.binop(and, [[RES1]], [[VAL1]]) : !u8i
+ // LLVM: [[VAL1:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES1:%.*]] = atomicrmw and ptr %{{.*}}, i8 [[VAL1]] seq_cst, align 1
+ // LLVM: [[RET1:%.*]] = and i8 [[RES1]], [[VAL1]]
+ // LLVM: store i8 [[RET1]], ptr %{{.*}}, align 1
+ uc = __sync_and_and_fetch(&uc, uc);
+
+ // CHECK: [[VAL2:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s16i
+ // CHECK: [[RES2:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
+ // CHECK: [[RET2:%.*]] = cir.binop(and, [[RES2]], [[VAL2]]) : !s16i
+ // LLVM: [[VAL2:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
+ // LLVM: [[RES2:%.*]] = atomicrmw and ptr %{{.*}}, i16 [[CONV2]] seq_cst, align 2
+ // LLVM: [[RET2:%.*]] = and i16 [[RES2]], [[CONV2]]
+ // LLVM: store i16 [[RET2]], ptr %{{.*}}, align 2
+ ss = __sync_and_and_fetch(&ss, uc);
+
+ // CHECK: [[VAL3:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u16i
+ // CHECK: [[RES3:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
+ // CHECK: [[RET3:%.*]] = cir.binop(and, [[RES3]], [[VAL3]]) : !u16i
+ // LLVM: [[VAL3:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
+ // LLVM: [[RES3:%.*]] = atomicrmw and ptr %{{.*}}, i16 [[CONV3]] seq_cst, align 2
+ // LLVM: [[RET3:%.*]] = and i16 [[RES3]], [[CONV3]]
+ // LLVM: store i16 [[RET3]], ptr %{{.*}}
+ us = __sync_and_and_fetch(&us, uc);
+
+ // CHECK: [[VAL4:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s32i
+ // CHECK: [[RES4:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
+ // CHECK: [[RET4:%.*]] = cir.binop(and, [[RES4]], [[VAL4]]) : !s32i
+ // LLVM: [[VAL4:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
+ // LLVM: [[RES4:%.*]] = atomicrmw and ptr %{{.*}}, i32 [[CONV4]] seq_cst, align 4
+ // LLVM: [[RET4:%.*]] = and i32 [[RES4]], [[CONV4]]
+ // LLVM: store i32 [[RET4]], ptr %{{.*}}, align 4
+ si = __sync_and_and_fetch(&si, uc);
+
+ // CHECK: [[VAL5:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u32i
+ // CHECK: [[RES5:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
+ // CHECK: [[RET5:%.*]] = cir.binop(and, [[RES5]], [[VAL5]]) : !u32i
+ // LLVM: [[VAL5:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
+ // LLVM: [[RES5:%.*]] = atomicrmw and ptr %{{.*}}, i32 [[CONV5]] seq_cst, align 4
+ // LLVM: [[RET5:%.*]] = and i32 [[RES5]], [[CONV5]]
+ // LLVM: store i32 [[RET5]], ptr %{{.*}}, align 4
+ ui = __sync_and_and_fetch(&ui, uc);
+
+ // CHECK: [[VAL6:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s64i
+ // CHECK: [[RES6:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
+ // CHECK: [[RET6:%.*]] = cir.binop(and, [[RES6]], [[VAL6]]) : !s64i
+ // LLVM: [[VAL6:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
+ // LLVM: [[RES6:%.*]] = atomicrmw and ptr %{{.*}}, i64 [[CONV6]] seq_cst, align 8
+ // LLVM: [[RET6:%.*]] = and i64 [[RES6]], [[CONV6]]
+ // LLVM: store i64 [[RET6]], ptr %{{.*}}, align 8
+ sll = __sync_and_and_fetch(&sll, uc);
+
+ // CHECK: [[VAL7:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u64i
+ // CHECK: [[RES7:%.*]] = cir.atomic.fetch(and, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
+ // CHECK: [[RET7:%.*]] = cir.binop(and, [[RES7]], [[VAL7]]) : !u64i
+ // LLVM: [[VAL7:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
+ // LLVM: [[RES7:%.*]] = atomicrmw and ptr %{{.*}}, i64 [[CONV7]] seq_cst, align 8
+ // LLVM: [[RET7:%.*]] = and i64 [[RES7]], [[CONV7]]
+ // LLVM: store i64 [[RET7]], ptr %{{.*}}, align 8
+ ull = __sync_and_and_fetch(&ull, uc);
+
+ // CHECK: [[VAL0:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s8i
+ // CHECK: [[RES0:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
+ // CHECK: [[RET0:%.*]] = cir.binop(or, [[RES0]], [[VAL0]]) : !s8i
+ // LLVM: [[VAL0:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES0:%.*]] = atomicrmw or ptr %{{.*}}, i8 [[VAL0]] seq_cst, align 1
+ // LLVM: [[RET0:%.*]] = or i8 [[RES0]], [[VAL0]]
+ // LLVM: store i8 [[RET0]], ptr %{{.*}}, align 1
+ sc = __sync_or_and_fetch(&sc, uc);
+
+ // CHECK: [[RES1:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
+ // CHECK: [[RET1:%.*]] = cir.binop(or, [[RES1]], [[VAL1]]) : !u8i
+ // LLVM: [[VAL1:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES1:%.*]] = atomicrmw or ptr %{{.*}}, i8 [[VAL1]] seq_cst, align 1
+ // LLVM: [[RET1:%.*]] = or i8 [[RES1]], [[VAL1]]
+ // LLVM: store i8 [[RET1]], ptr %{{.*}}, align 1
+ uc = __sync_or_and_fetch(&uc, uc);
+
+ // CHECK: [[VAL2:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s16i
+ // CHECK: [[RES2:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
+ // CHECK: [[RET2:%.*]] = cir.binop(or, [[RES2]], [[VAL2]]) : !s16i
+ // LLVM: [[VAL2:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
+ // LLVM: [[RES2:%.*]] = atomicrmw or ptr %{{.*}}, i16 [[CONV2]] seq_cst, align 2
+ // LLVM: [[RET2:%.*]] = or i16 [[RES2]], [[CONV2]]
+ // LLVM: store i16 [[RET2]], ptr %{{.*}}, align 2
+ ss = __sync_or_and_fetch(&ss, uc);
+
+ // CHECK: [[VAL3:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u16i
+ // CHECK: [[RES3:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
+ // CHECK: [[RET3:%.*]] = cir.binop(or, [[RES3]], [[VAL3]]) : !u16i
+ // LLVM: [[VAL3:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
+ // LLVM: [[RES3:%.*]] = atomicrmw or ptr %{{.*}}, i16 [[CONV3]] seq_cst, align 2
+ // LLVM: [[RET3:%.*]] = or i16 [[RES3]], [[CONV3]]
+ // LLVM: store i16 [[RET3]], ptr %{{.*}}
+ us = __sync_or_and_fetch(&us, uc);
+
+ // CHECK: [[VAL4:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s32i
+ // CHECK: [[RES4:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
+ // CHECK: [[RET4:%.*]] = cir.binop(or, [[RES4]], [[VAL4]]) : !s32i
+ // LLVM: [[VAL4:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
+ // LLVM: [[RES4:%.*]] = atomicrmw or ptr %{{.*}}, i32 [[CONV4]] seq_cst, align 4
+ // LLVM: [[RET4:%.*]] = or i32 [[RES4]], [[CONV4]]
+ // LLVM: store i32 [[RET4]], ptr %{{.*}}, align 4
+ si = __sync_or_and_fetch(&si, uc);
+
+ // CHECK: [[VAL5:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u32i
+ // CHECK: [[RES5:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
+ // CHECK: [[RET5:%.*]] = cir.binop(or, [[RES5]], [[VAL5]]) : !u32i
+ // LLVM: [[VAL5:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
+ // LLVM: [[RES5:%.*]] = atomicrmw or ptr %{{.*}}, i32 [[CONV5]] seq_cst, align 4
+ // LLVM: [[RET5:%.*]] = or i32 [[RES5]], [[CONV5]]
+ // LLVM: store i32 [[RET5]], ptr %{{.*}}, align 4
+ ui = __sync_or_and_fetch(&ui, uc);
+
+ // CHECK: [[VAL6:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s64i
+ // CHECK: [[RES6:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
+ // CHECK: [[RET6:%.*]] = cir.binop(or, [[RES6]], [[VAL6]]) : !s64i
+ // LLVM: [[VAL6:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
+ // LLVM: [[RES6:%.*]] = atomicrmw or ptr %{{.*}}, i64 [[CONV6]] seq_cst, align 8
+ // LLVM: [[RET6:%.*]] = or i64 [[RES6]], [[CONV6]]
+ // LLVM: store i64 [[RET6]], ptr %{{.*}}, align 8
+ sll = __sync_or_and_fetch(&sll, uc);
+
+ // CHECK: [[VAL7:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u64i
+ // CHECK: [[RES7:%.*]] = cir.atomic.fetch(or, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
+ // CHECK: [[RET7:%.*]] = cir.binop(or, [[RES7]], [[VAL7]]) : !u64i
+ // LLVM: [[VAL7:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
+ // LLVM: [[RES7:%.*]] = atomicrmw or ptr %{{.*}}, i64 [[CONV7]] seq_cst, align 8
+ // LLVM: [[RET7:%.*]] = or i64 [[RES7]], [[CONV7]]
+ // LLVM: store i64 [[RET7]], ptr %{{.*}}, align 8
+ ull = __sync_or_and_fetch(&ull, uc);
+
+ // CHECK: [[VAL0:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s8i
+ // CHECK: [[RES0:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
+ // CHECK: [[RET0:%.*]] = cir.binop(xor, [[RES0]], [[VAL0]]) : !s8i
+ // LLVM: [[VAL0:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES0:%.*]] = atomicrmw xor ptr %{{.*}}, i8 [[VAL0]] seq_cst, align 1
+ // LLVM: [[RET0:%.*]] = xor i8 [[RES0]], [[VAL0]]
+ // LLVM: store i8 [[RET0]], ptr %{{.*}}, align 1
+ sc = __sync_xor_and_fetch(&sc, uc);
+
+ // CHECK: [[RES1:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
+ // CHECK: [[RET1:%.*]] = cir.binop(xor, [[RES1]], [[VAL1]]) : !u8i
+ // LLVM: [[VAL1:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES1:%.*]] = atomicrmw xor ptr %{{.*}}, i8 [[VAL1]] seq_cst, align 1
+ // LLVM: [[RET1:%.*]] = xor i8 [[RES1]], [[VAL1]]
+ // LLVM: store i8 [[RET1]], ptr %{{.*}}, align 1
+ uc = __sync_xor_and_fetch(&uc, uc);
+
+ // CHECK: [[VAL2:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s16i
+ // CHECK: [[RES2:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
+ // CHECK: [[RET2:%.*]] = cir.binop(xor, [[RES2]], [[VAL2]]) : !s16i
+ // LLVM: [[VAL2:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
+ // LLVM: [[RES2:%.*]] = atomicrmw xor ptr %{{.*}}, i16 [[CONV2]] seq_cst, align 2
+ // LLVM: [[RET2:%.*]] = xor i16 [[RES2]], [[CONV2]]
+ // LLVM: store i16 [[RET2]], ptr %{{.*}}, align 2
+ ss = __sync_xor_and_fetch(&ss, uc);
+
+ // CHECK: [[VAL3:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u16i
+ // CHECK: [[RES3:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
+ // CHECK: [[RET3:%.*]] = cir.binop(xor, [[RES3]], [[VAL3]]) : !u16i
+ // LLVM: [[VAL3:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
+ // LLVM: [[RES3:%.*]] = atomicrmw xor ptr %{{.*}}, i16 [[CONV3]] seq_cst, align 2
+ // LLVM: [[RET3:%.*]] = xor i16 [[RES3]], [[CONV3]]
+ // LLVM: store i16 [[RET3]], ptr %{{.*}}
+ us = __sync_xor_and_fetch(&us, uc);
+
+ // CHECK: [[VAL4:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s32i
+ // CHECK: [[RES4:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
+ // CHECK: [[RET4:%.*]] = cir.binop(xor, [[RES4]], [[VAL4]]) : !s32i
+ // LLVM: [[VAL4:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
+ // LLVM: [[RES4:%.*]] = atomicrmw xor ptr %{{.*}}, i32 [[CONV4]] seq_cst, align 4
+ // LLVM: [[RET4:%.*]] = xor i32 [[RES4]], [[CONV4]]
+ // LLVM: store i32 [[RET4]], ptr %{{.*}}, align 4
+ si = __sync_xor_and_fetch(&si, uc);
+
+ // CHECK: [[VAL5:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u32i
+ // CHECK: [[RES5:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
+ // CHECK: [[RET5:%.*]] = cir.binop(xor, [[RES5]], [[VAL5]]) : !u32i
+ // LLVM: [[VAL5:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
+ // LLVM: [[RES5:%.*]] = atomicrmw xor ptr %{{.*}}, i32 [[CONV5]] seq_cst, align 4
+ // LLVM: [[RET5:%.*]] = xor i32 [[RES5]], [[CONV5]]
+ // LLVM: store i32 [[RET5]], ptr %{{.*}}, align 4
+ ui = __sync_xor_and_fetch(&ui, uc);
+
+ // CHECK: [[VAL6:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s64i
+ // CHECK: [[RES6:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
+ // CHECK: [[RET6:%.*]] = cir.binop(xor, [[RES6]], [[VAL6]]) : !s64i
+ // LLVM: [[VAL6:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
+ // LLVM: [[RES6:%.*]] = atomicrmw xor ptr %{{.*}}, i64 [[CONV6]] seq_cst, align 8
+ // LLVM: [[RET6:%.*]] = xor i64 [[RES6]], [[CONV6]]
+ // LLVM: store i64 [[RET6]], ptr %{{.*}}, align 8
+ sll = __sync_xor_and_fetch(&sll, uc);
+
+ // CHECK: [[VAL7:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u64i
+ // CHECK: [[RES7:%.*]] = cir.atomic.fetch(xor, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
+ // CHECK: [[RET7:%.*]] = cir.binop(xor, [[RES7]], [[VAL7]]) : !u64i
+ // LLVM: [[VAL7:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
+ // LLVM: [[RES7:%.*]] = atomicrmw xor ptr %{{.*}}, i64 [[CONV7]] seq_cst, align 8
+ // LLVM: [[RET7:%.*]] = xor i64 [[RES7]], [[CONV7]]
+ // LLVM: store i64 [[RET7]], ptr %{{.*}}, align 8
+ ull = __sync_xor_and_fetch(&ull, uc);
+
+ // CHECK: [[VAL0:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s8i
+ // CHECK: [[RES0:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr<!s8i>, [[VAL0]] : !s8i, seq_cst) fetch_first : !s8i
+ // CHECK: [[INTERM0:%.*]] = cir.binop(and, [[RES0]], [[VAL0]]) : !s8i
+ // CHECK: [[RET0:%.*]] = cir.unary(not, [[INTERM0]]) : !s8i, !s8i
+ // LLVM: [[VAL0:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES0:%.*]] = atomicrmw nand ptr %{{.*}}, i8 [[VAL0]] seq_cst, align 1
+ // LLVM: [[INTERM0:%.*]] = and i8 [[RES0]], [[VAL0]]
+ // LLVM: [[RET0:%.*]] = xor i8 [[INTERM0]], -1
+ // LLVM: store i8 [[RET0]], ptr %{{.*}}, align 1
+ sc = __sync_nand_and_fetch(&sc, uc);
+
+ // CHECK: [[RES1:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr<!u8i>, [[VAL1:%.*]] : !u8i, seq_cst) fetch_first : !u8i
+ // CHECK: [[INTERM1:%.*]] = cir.binop(and, [[RES1]], [[VAL1]]) : !u8i
+ // CHECK: [[RET1:%.*]] = cir.unary(not, [[INTERM1]]) : !u8i, !u8i
+ // LLVM: [[VAL1:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[RES1:%.*]] = atomicrmw nand ptr %{{.*}}, i8 [[VAL1]] seq_cst, align 1
+ // LLVM: [[INTERM1:%.*]] = and i8 [[RES1]], [[VAL1]]
+ // LLVM: [[RET1:%.*]] = xor i8 [[INTERM1]], -1
+ // LLVM: store i8 [[RET1]], ptr %{{.*}}, align 1
+ uc = __sync_nand_and_fetch(&uc, uc);
+
+ // CHECK: [[VAL2:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s16i
+ // CHECK: [[RES2:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr<!s16i>, [[VAL2]] : !s16i, seq_cst) fetch_first : !s16i
+ // CHECK: [[INTERM2:%.*]] = cir.binop(and, [[RES2]], [[VAL2]]) : !s16i
+ // CHECK: [[RET2:%.*]] = cir.unary(not, [[INTERM2]]) : !s16i, !s16i
+ // LLVM: [[VAL2:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV2:%.*]] = zext i8 [[VAL2]] to i16
+ // LLVM: [[RES2:%.*]] = atomicrmw nand ptr %{{.*}}, i16 [[CONV2]] seq_cst, align 2
+ // LLVM: [[INTERM2:%.*]] = and i16 [[RES2]], [[CONV2]]
+ // LLVM: [[RET2:%.*]] = xor i16 [[INTERM2]], -1
+ // LLVM: store i16 [[RET2]], ptr %{{.*}}, align 2
+ ss = __sync_nand_and_fetch(&ss, uc);
+
+ // CHECK: [[VAL3:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u16i
+ // CHECK: [[RES3:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr<!u16i>, [[VAL3]] : !u16i, seq_cst) fetch_first : !u16i
+ // CHECK: [[INTERM3:%.*]] = cir.binop(and, [[RES3]], [[VAL3]]) : !u16i
+ // CHECK: [[RET3:%.*]] = cir.unary(not, [[INTERM3]]) : !u16i, !u16i
+ // LLVM: [[VAL3:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV3:%.*]] = zext i8 [[VAL3]] to i16
+ // LLVM: [[RES3:%.*]] = atomicrmw nand ptr %{{.*}}, i16 [[CONV3]] seq_cst, align 2
+ // LLVM: [[INTERM3:%.*]] = and i16 [[RES3]], [[CONV3]]
+ // LLVM: [[RET3:%.*]] = xor i16 [[INTERM3]], -1
+ // LLVM: store i16 [[RET3]], ptr %{{.*}}, align 2
+ us = __sync_nand_and_fetch(&us, uc);
+
+ // CHECK: [[VAL4:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s32i
+ // CHECK: [[RES4:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr<!s32i>, [[VAL4]] : !s32i, seq_cst) fetch_first : !s32i
+ // CHECK: [[INTERM4:%.*]] = cir.binop(and, [[RES4]], [[VAL4]]) : !s32i
+ // CHECK: [[RET4:%.*]] = cir.unary(not, [[INTERM4]]) : !s32i, !s32i
+ // LLVM: [[VAL4:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV4:%.*]] = zext i8 [[VAL4]] to i32
+ // LLVM: [[RES4:%.*]] = atomicrmw nand ptr %{{.*}}, i32 [[CONV4]] seq_cst, align 4
+ // LLVM: [[INTERM4:%.*]] = and i32 [[RES4]], [[CONV4]]
+ // LLVM: [[RET4:%.*]] = xor i32 [[INTERM4]], -1
+ // LLVM: store i32 [[RET4]], ptr %{{.*}}, align 4
+ si = __sync_nand_and_fetch(&si, uc);
+
+ // CHECK: [[VAL5:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u32i
+ // CHECK: [[RES5:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr<!u32i>, [[VAL5]] : !u32i, seq_cst) fetch_first : !u32i
+ // CHECK: [[INTERM5:%.*]] = cir.binop(and, [[RES5]], [[VAL5]]) : !u32i
+ // CHECK: [[RET5:%.*]] = cir.unary(not, [[INTERM5]]) : !u32i, !u32i
+ // LLVM: [[VAL5:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV5:%.*]] = zext i8 [[VAL5]] to i32
+ // LLVM: [[RES5:%.*]] = atomicrmw nand ptr %{{.*}}, i32 [[CONV5]] seq_cst, align 4
+ // LLVM: [[INTERM5:%.*]] = and i32 [[RES5]], [[CONV5]]
+ // LLVM: [[RET5:%.*]] = xor i32 [[INTERM5]], -1
+ // LLVM: store i32 [[RET5]], ptr %{{.*}}, align 4
+ ui = __sync_nand_and_fetch(&ui, uc);
+
+ // CHECK: [[VAL6:%.*]] = cir.cast integral {{%.*}} : !u8i -> !s64i
+ // CHECK: [[RES6:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr<!s64i>, [[VAL6]] : !s64i, seq_cst) fetch_first : !s64i
+ // CHECK: [[INTERM6:%.*]] = cir.binop(and, [[RES6]], [[VAL6]]) : !s64i
+ // CHECK: [[RET6:%.*]] = cir.unary(not, [[INTERM6]]) : !s64i, !s64i
+ // LLVM: [[VAL6:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV6:%.*]] = zext i8 [[VAL6]] to i64
+ // LLVM: [[RES6:%.*]] = atomicrmw nand ptr %{{.*}}, i64 [[CONV6]] seq_cst, align 8
+ // LLVM: [[INTERM6:%.*]] = and i64 [[RES6]], [[CONV6]]
+ // LLVM: [[RET6:%.*]] = xor i64 [[INTERM6]], -1
+ // LLVM: store i64 [[RET6]], ptr %{{.*}}, align 8
+ sll = __sync_nand_and_fetch(&sll, uc);
+
+ // CHECK: [[VAL7:%.*]] = cir.cast integral {{%.*}} : !u8i -> !u64i
+ // CHECK: [[RES7:%.*]] = cir.atomic.fetch(nand, {{%.*}} : !cir.ptr<!u64i>, [[VAL7]] : !u64i, seq_cst) fetch_first : !u64i
+ // CHECK: [[INTERM7:%.*]] = cir.binop(and, [[RES7]], [[VAL7]]) : !u64i
+ // CHECK: [[RET7:%.*]] = cir.unary(not, [[INTERM7]]) : !u64i, !u64i
+ // LLVM: [[VAL7:%.*]] = load i8, ptr %{{.*}}, align 1
+ // LLVM: [[CONV7:%.*]] = zext i8 [[VAL7]] to i64
+ // LLVM: [[RES7:%.*]] = atomicrmw nand ptr %{{.*}}, i64 [[CONV7]] seq_cst, align 8
+ // LLVM: [[INTERM7:%.*]] = and i64 [[RES7]], [[CONV7]]
+ // LLVM: [[RET7:%.*]] = xor i64 [[INTERM7]], -1
+ // LLVM: store i64 [[RET7]], ptr %{{.*}}, align 8
+ ull = __sync_nand_and_fetch(&ull, uc);
+}
More information about the cfe-commits
mailing list