[clang] 331a5db - [CIR] Add initial support for atomic types (#152923)

via cfe-commits cfe-commits at lists.llvm.org
Tue Aug 12 18:22:51 PDT 2025


Author: Sirui Mu
Date: 2025-08-13T09:22:48+08:00
New Revision: 331a5db9de0e17e4c54dfbd58ddc54a8111d3cba

URL: https://github.com/llvm/llvm-project/commit/331a5db9de0e17e4c54dfbd58ddc54a8111d3cba
DIFF: https://github.com/llvm/llvm-project/commit/331a5db9de0e17e4c54dfbd58ddc54a8111d3cba.diff

LOG: [CIR] Add initial support for atomic types (#152923)

Added: 
    clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
    clang/test/CIR/CodeGen/atomic.c

Modified: 
    clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
    clang/include/clang/CIR/MissingFeatures.h
    clang/lib/CIR/CodeGen/CIRGenExpr.cpp
    clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.h
    clang/lib/CIR/CodeGen/CIRGenTypes.cpp
    clang/lib/CIR/CodeGen/CIRGenValue.h
    clang/lib/CIR/CodeGen/CMakeLists.txt
    clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
index 8ef565d6afd34..a46c2679d0481 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
+++ b/clang/include/clang/CIR/Dialect/IR/CIRDataLayout.h
@@ -34,6 +34,21 @@ class CIRDataLayout {
   void reset(mlir::DataLayoutSpecInterface spec);
 
   bool isBigEndian() const { return bigEndian; }
+
+  /// Returns the maximum number of bytes that may be overwritten by
+  /// storing the specified type.
+  ///
+  /// If Ty is a scalable vector type, the scalable property will be set and
+  /// the runtime size will be a positive integer multiple of the base size.
+  ///
+  /// For example, returns 5 for i36 and 10 for x86_fp80.
+  llvm::TypeSize getTypeStoreSize(mlir::Type ty) const {
+    llvm::TypeSize baseSize = getTypeSizeInBits(ty);
+    return {llvm::divideCeil(baseSize.getKnownMinValue(), 8),
+            baseSize.isScalable()};
+  }
+
+  llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const;
 };
 
 } // namespace cir

diff  --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 8386a17ba7556..805c43e6d5054 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -161,6 +161,13 @@ struct MissingFeatures {
   static bool addressIsKnownNonNull() { return false; }
   static bool addressPointerAuthInfo() { return false; }
 
+  // Atomic
+  static bool atomicExpr() { return false; }
+  static bool atomicInfo() { return false; }
+  static bool atomicInfoGetAtomicPointer() { return false; }
+  static bool atomicInfoGetAtomicAddress() { return false; }
+  static bool atomicUseLibCall() { return false; }
+
   // Misc
   static bool abiArgInfo() { return false; }
   static bool addHeapAllocSiteMetadata() { return false; }
@@ -196,7 +203,9 @@ struct MissingFeatures {
   static bool ctorMemcpyizer() { return false; }
   static bool cudaSupport() { return false; }
   static bool cxxRecordStaticMembers() { return false; }
+  static bool dataLayoutTypeIsSized() { return false; }
   static bool dataLayoutTypeAllocSize() { return false; }
+  static bool dataLayoutTypeStoreSize() { return false; }
   static bool deferredCXXGlobalInit() { return false; }
   static bool ehCleanupFlags() { return false; }
   static bool ehCleanupScope() { return false; }
@@ -237,6 +246,7 @@ struct MissingFeatures {
   static bool objCBlocks() { return false; }
   static bool objCGC() { return false; }
   static bool objCLifetime() { return false; }
+  static bool openCL() { return false; }
   static bool openMP() { return false; }
   static bool opTBAA() { return false; }
   static bool peepholeProtection() { return false; }

diff  --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
new file mode 100644
index 0000000000000..979085f037d4f
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -0,0 +1,230 @@
+//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the code for emitting atomic operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+using namespace cir;
+
+namespace {
+class AtomicInfo {
+  CIRGenFunction &cgf;
+  QualType atomicTy;
+  QualType valueTy;
+  uint64_t atomicSizeInBits = 0;
+  uint64_t valueSizeInBits = 0;
+  CharUnits atomicAlign;
+  CharUnits valueAlign;
+  TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
+  LValue lvalue;
+  mlir::Location loc;
+
+public:
+  AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
+      : cgf(cgf), loc(loc) {
+    assert(!lvalue.isGlobalReg());
+    ASTContext &ctx = cgf.getContext();
+    if (lvalue.isSimple()) {
+      atomicTy = lvalue.getType();
+      if (auto *ty = atomicTy->getAs<AtomicType>())
+        valueTy = ty->getValueType();
+      else
+        valueTy = atomicTy;
+      evaluationKind = cgf.getEvaluationKind(valueTy);
+
+      TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
+      TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
+      uint64_t valueAlignInBits = valueTypeInfo.Align;
+      uint64_t atomicAlignInBits = atomicTypeInfo.Align;
+      valueSizeInBits = valueTypeInfo.Width;
+      atomicSizeInBits = atomicTypeInfo.Width;
+      assert(valueSizeInBits <= atomicSizeInBits);
+      assert(valueAlignInBits <= atomicAlignInBits);
+
+      atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
+      valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
+      if (lvalue.getAlignment().isZero())
+        lvalue.setAlignment(atomicAlign);
+
+      this->lvalue = lvalue;
+    } else {
+      assert(!cir::MissingFeatures::atomicInfo());
+      cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
+    }
+
+    assert(!cir::MissingFeatures::atomicUseLibCall());
+  }
+
+  QualType getValueType() const { return valueTy; }
+  CharUnits getAtomicAlignment() const { return atomicAlign; }
+  TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
+  mlir::Value getAtomicPointer() const {
+    if (lvalue.isSimple())
+      return lvalue.getPointer();
+    assert(!cir::MissingFeatures::atomicInfoGetAtomicPointer());
+    return nullptr;
+  }
+  Address getAtomicAddress() const {
+    mlir::Type elemTy;
+    if (lvalue.isSimple()) {
+      elemTy = lvalue.getAddress().getElementType();
+    } else {
+      assert(!cir::MissingFeatures::atomicInfoGetAtomicAddress());
+      cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
+    }
+    return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
+  }
+
+  /// Is the atomic size larger than the underlying value type?
+  ///
+  /// Note that the absence of padding does not mean that atomic
+  /// objects are completely interchangeable with non-atomic
+  /// objects: we might have promoted the alignment of a type
+  /// without making it bigger.
+  bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
+
+  bool emitMemSetZeroIfNecessary() const;
+
+  /// Copy an atomic r-value into atomic-layout memory.
+  void emitCopyIntoMemory(RValue rvalue) const;
+
+  /// Project an l-value down to the value field.
+  LValue projectValue() const {
+    assert(lvalue.isSimple());
+    Address addr = getAtomicAddress();
+    if (hasPadding()) {
+      cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
+    }
+
+    assert(!cir::MissingFeatures::opTBAA());
+    return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
+  }
+
+private:
+  bool requiresMemSetZero(mlir::Type ty) const;
+};
+} // namespace
+
+/// Does a store of the given IR type modify the full expected width?
+static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
+                           uint64_t expectedSize) {
+  return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
+}
+
+/// Does the atomic type require memsetting to zero before initialization?
+///
+/// The IR type is provided as a way of making certain queries faster.
+bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
+  // If the atomic type has size padding, we definitely need a memset.
+  if (hasPadding())
+    return true;
+
+  // Otherwise, do some simple heuristics to try to avoid it:
+  switch (getEvaluationKind()) {
+  // For scalars and complexes, check whether the store size of the
+  // type uses the full size.
+  case cir::TEK_Scalar:
+    return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
+  case cir::TEK_Complex:
+    cgf.cgm.errorNYI(loc, "AtomicInfo::requiresMemSetZero: complex type");
+    return false;
+
+  // Padding in structs has an undefined bit pattern.  User beware.
+  case cir::TEK_Aggregate:
+    return false;
+  }
+  llvm_unreachable("bad evaluation kind");
+}
+
+bool AtomicInfo::emitMemSetZeroIfNecessary() const {
+  assert(lvalue.isSimple());
+  Address addr = lvalue.getAddress();
+  if (!requiresMemSetZero(addr.getElementType()))
+    return false;
+
+  cgf.cgm.errorNYI(loc,
+                   "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
+  return false;
+}
+
+/// Copy an r-value into memory as part of storing to an atomic type.
+/// This needs to create a bit-pattern suitable for atomic operations.
+void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
+  assert(lvalue.isSimple());
+
+  // If we have an r-value, the rvalue should be of the atomic type,
+  // which means that the caller is responsible for having zeroed
+  // any padding.  Just do an aggregate copy of that type.
+  if (rvalue.isAggregate()) {
+    cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
+    return;
+  }
+
+  // Okay, otherwise we're copying stuff.
+
+  // Zero out the buffer if necessary.
+  emitMemSetZeroIfNecessary();
+
+  // Drill past the padding if present.
+  LValue tempLValue = projectValue();
+
+  // Okay, store the rvalue in.
+  if (rvalue.isScalar()) {
+    cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
+  } else {
+    cgf.cgm.errorNYI("copying complex into atomic lvalue");
+  }
+}
+
+RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
+  QualType atomicTy = e->getPtr()->getType()->getPointeeType();
+  QualType memTy = atomicTy;
+  if (const auto *ty = atomicTy->getAs<AtomicType>())
+    memTy = ty->getValueType();
+
+  Address ptr = emitPointerWithAlignment(e->getPtr());
+
+  assert(!cir::MissingFeatures::openCL());
+  if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
+    LValue lvalue = makeAddrLValue(ptr, atomicTy);
+    emitAtomicInit(e->getVal1(), lvalue);
+    return RValue::get(nullptr);
+  }
+
+  assert(!cir::MissingFeatures::atomicExpr());
+  cgm.errorNYI(e->getSourceRange(), "atomic expr is NYI");
+  return RValue::get(nullptr);
+}
+
+void CIRGenFunction::emitAtomicInit(Expr *init, LValue dest) {
+  AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
+
+  switch (atomics.getEvaluationKind()) {
+  case cir::TEK_Scalar: {
+    mlir::Value value = emitScalarExpr(init);
+    atomics.emitCopyIntoMemory(RValue::get(value));
+    return;
+  }
+
+  case cir::TEK_Complex:
+    cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: complex type");
+    return;
+
+  case cir::TEK_Aggregate:
+    cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: aggregate type");
+    return;
+  }
+
+  llvm_unreachable("bad evaluation kind");
+}

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 5d7dda63cc7d2..cc1fd81433b53 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -184,8 +184,11 @@ Address CIRGenFunction::emitPointerWithAlignment(const Expr *expr,
   if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
     // TODO(cir): maybe we should use cir.unary for pointers here instead.
     if (uo->getOpcode() == UO_AddrOf) {
-      cgm.errorNYI(expr->getSourceRange(), "emitPointerWithAlignment: unary &");
-      return Address::invalid();
+      LValue lv = emitLValue(uo->getSubExpr());
+      if (baseInfo)
+        *baseInfo = lv.getBaseInfo();
+      assert(!cir::MissingFeatures::opTBAA());
+      return lv.getAddress();
     }
   }
 

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index d4c7d306cb110..8649bab91ce8e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -1060,6 +1060,10 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
 
     return maybePromoteBoolResult(resOp.getResult(), resTy);
   }
+
+  mlir::Value VisitAtomicExpr(AtomicExpr *e) {
+    return cgf.emitAtomicExpr(e).getValue();
+  }
 };
 
 LValue ScalarExprEmitter::emitCompoundAssignLValue(

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index d905eba0b2691..2333ec3209c3b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -926,6 +926,9 @@ class CIRGenFunction : public CIRGenTypeCache {
 
   Address emitArrayToPointerDecay(const Expr *array);
 
+  RValue emitAtomicExpr(AtomicExpr *e);
+  void emitAtomicInit(Expr *init, LValue dest);
+
   AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d,
                                     mlir::OpBuilder::InsertPoint ip = {});
 
@@ -1234,7 +1237,7 @@ class CIRGenFunction : public CIRGenTypeCache {
   /// reasonable to just ignore the returned alignment when it isn't from an
   /// explicit source.
   Address emitPointerWithAlignment(const clang::Expr *expr,
-                                   LValueBaseInfo *baseInfo);
+                                   LValueBaseInfo *baseInfo = nullptr);
 
   /// Emits a reference binding to the passed in expression.
   RValue emitReferenceBindingToExpr(const Expr *e);

diff  --git a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
index 0084519154e2a..41433d3f16103 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenTypes.cpp
@@ -493,6 +493,20 @@ mlir::Type CIRGenTypes::convertType(QualType type) {
     break;
   }
 
+  case Type::Atomic: {
+    QualType valueType = cast<AtomicType>(ty)->getValueType();
+    resultType = convertTypeForMem(valueType);
+
+    // Pad out to the inflated size if necessary.
+    uint64_t valueSize = astContext.getTypeSize(valueType);
+    uint64_t atomicSize = astContext.getTypeSize(ty);
+    if (valueSize != atomicSize) {
+      cgm.errorNYI("convertType: atomic type value size != atomic size");
+    }
+
+    break;
+  }
+
   default:
     cgm.errorNYI(SourceLocation(), "processing of type",
                  type->getTypeClassName());

diff  --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index 0832c4141a10f..661cecf8416b6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -190,6 +190,7 @@ class LValue {
   bool isSimple() const { return lvType == Simple; }
   bool isVectorElt() const { return lvType == VectorElt; }
   bool isBitField() const { return lvType == BitField; }
+  bool isGlobalReg() const { return lvType == GlobalReg; }
   bool isVolatile() const { return quals.hasVolatile(); }
 
   bool isVolatileQualified() const { return quals.hasVolatile(); }

diff  --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 0a39ebf7d4c45..12cea944eb2f3 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -8,6 +8,7 @@ get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
 
 add_clang_library(clangCIR
   CIRGenerator.cpp
+  CIRGenAtomic.cpp
   CIRGenBuilder.cpp
   CIRGenCall.cpp
   CIRGenClass.cpp

diff  --git a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
index d835c4076224a..8b806b406a536 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDataLayout.cpp
@@ -1,4 +1,6 @@
 #include "clang/CIR/Dialect/IR/CIRDataLayout.h"
+#include "clang/CIR/Dialect/IR/CIRTypes.h"
+#include "clang/CIR/MissingFeatures.h"
 
 using namespace cir;
 
@@ -20,3 +22,21 @@ void CIRDataLayout::reset(mlir::DataLayoutSpecInterface spec) {
         bigEndian = str == mlir::DLTIDialect::kDataLayoutEndiannessBig;
   }
 }
+
+// The implementation of this method is provided inline as it is particularly
+// well suited to constant folding when called on a specific Type subclass.
+llvm::TypeSize CIRDataLayout::getTypeSizeInBits(mlir::Type ty) const {
+  assert(!cir::MissingFeatures::dataLayoutTypeIsSized());
+
+  if (auto recordTy = llvm::dyn_cast<cir::RecordType>(ty)) {
+    // FIXME(cir): CIR record's data layout implementation doesn't do a good job
+    // of handling unions particularities. We should have a separate union type.
+    return recordTy.getTypeSizeInBits(layout, {});
+  }
+
+  // FIXME(cir): This does not account for 
diff erent address spaces, and relies
+  // on CIR's data layout to give the proper ABI-specific type width.
+  assert(!cir::MissingFeatures::addressSpace());
+
+  return llvm::TypeSize::getFixed(layout.getTypeSizeInBits(ty));
+}

diff  --git a/clang/test/CIR/CodeGen/atomic.c b/clang/test/CIR/CodeGen/atomic.c
new file mode 100644
index 0000000000000..8db4ae43d7389
--- /dev/null
+++ b/clang/test/CIR/CodeGen/atomic.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG
+
+void f1(void) {
+  _Atomic(int) x = 42;
+}
+
+// CIR-LABEL: @f1
+// CIR:         %[[SLOT:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init] {alignment = 4 : i64}
+// CIR-NEXT:    %[[INIT:.+]] = cir.const #cir.int<42> : !s32i
+// CIR-NEXT:    cir.store align(4) %[[INIT]], %[[SLOT]] : !s32i, !cir.ptr<!s32i>
+// CIR:       }
+
+// LLVM-LABEL: @f1
+// LLVM:         %[[SLOT:.+]] = alloca i32, i64 1, align 4
+// LLVM-NEXT:    store i32 42, ptr %[[SLOT]], align 4
+// LLVM:       }
+
+// OGCG-LABEL: @f1
+// OGCG:         %[[SLOT:.+]] = alloca i32, align 4
+// OGCG-NEXT:    store i32 42, ptr %[[SLOT]], align 4
+// OGCG:       }
+
+void f2(void) {
+  _Atomic(int) x;
+  __c11_atomic_init(&x, 42);
+}
+
+// CIR-LABEL: @f2
+// CIR:         %[[SLOT:.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x"] {alignment = 4 : i64}
+// CIR-NEXT:    %[[INIT:.+]] = cir.const #cir.int<42> : !s32i
+// CIR-NEXT:    cir.store align(4) %[[INIT]], %[[SLOT]] : !s32i, !cir.ptr<!s32i>
+// CIR:       }
+
+// LLVM-LABEL: @f2
+// LLVM:         %[[SLOT:.+]] = alloca i32, i64 1, align 4
+// LLVM-NEXT:    store i32 42, ptr %[[SLOT]], align 4
+// LLVM:       }
+
+// OGCG-LABEL: @f2
+// OGCG:         %[[SLOT:.+]] = alloca i32, align 4
+// OGCG-NEXT:    store i32 42, ptr %[[SLOT]], align 4
+// OGCG:       }


        


More information about the cfe-commits mailing list