[clang] [Clang] Re-write codegen for atomic_test_and_set and atomic_clear (PR #120449)
Oliver Stannard via cfe-commits
cfe-commits at lists.llvm.org
Wed Dec 18 09:11:31 PST 2024
https://github.com/ostannard updated https://github.com/llvm/llvm-project/pull/120449
>From 28174b0b54d36b070200d630bdeae64232264841 Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Wed, 18 Dec 2024 15:46:02 +0000
Subject: [PATCH 1/4] Add test for current behaviour
---
clang/test/CodeGen/atomic-test-and-set.c | 199 +++++++++++++++++++++++
1 file changed, 199 insertions(+)
create mode 100644 clang/test/CodeGen/atomic-test-and-set.c
diff --git a/clang/test/CodeGen/atomic-test-and-set.c b/clang/test/CodeGen/atomic-test-and-set.c
new file mode 100644
index 00000000000000..a736849f16e3ac
--- /dev/null
+++ b/clang/test/CodeGen/atomic-test-and-set.c
@@ -0,0 +1,199 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=aarch64-none-elf | FileCheck %s
+// REQUIRES: aarch64-registered-target
+
+#include <stdatomic.h>
+
+// CHECK-LABEL: define dso_local void @clear_relaxed(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1
+// CHECK-NEXT: ret void
+//
+void clear_relaxed(char *ptr) {
+ __atomic_clear(ptr, memory_order_relaxed);
+}
+
+// CHECK-LABEL: define dso_local void @clear_seq_cst(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] seq_cst, align 1
+// CHECK-NEXT: ret void
+//
+void clear_seq_cst(char *ptr) {
+ __atomic_clear(ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: define dso_local void @clear_release(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] release, align 1
+// CHECK-NEXT: ret void
+//
+void clear_release(char *ptr) {
+ __atomic_clear(ptr, memory_order_release);
+}
+
+// CHECK-LABEL: define dso_local void @clear_dynamic(
+// CHECK-SAME: ptr noundef [[PTR:%.*]], i32 noundef [[ORDER:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[ORDER_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: store i32 [[ORDER]], ptr [[ORDER_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ORDER_ADDR]], align 4
+// CHECK-NEXT: switch i32 [[TMP1]], label %[[MONOTONIC:.*]] [
+// CHECK-NEXT: i32 0, label %[[MONOTONIC]]
+// CHECK-NEXT: i32 3, label %[[RELEASE:.*]]
+// CHECK-NEXT: i32 5, label %[[SEQCST:.*]]
+// CHECK-NEXT: ]
+// CHECK: [[ATOMIC_CONTINUE:.*]]:
+// CHECK-NEXT: ret void
+// CHECK: [[MONOTONIC]]:
+// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK: [[RELEASE]]:
+// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] release, align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK: [[SEQCST]]:
+// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] seq_cst, align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+//
+void clear_dynamic(char *ptr, int order) {
+ __atomic_clear(ptr, order);
+}
+
+// CHECK-LABEL: define dso_local void @test_and_set_relaxed(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: ret void
+//
+void test_and_set_relaxed(char *ptr) {
+ __atomic_test_and_set(ptr, memory_order_relaxed);
+}
+
+// CHECK-LABEL: define dso_local void @test_and_set_consume(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: ret void
+//
+void test_and_set_consume(char *ptr) {
+ __atomic_test_and_set(ptr, memory_order_consume);
+}
+
+// CHECK-LABEL: define dso_local void @test_and_set_acquire(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: ret void
+//
+void test_and_set_acquire(char *ptr) {
+ __atomic_test_and_set(ptr, memory_order_acquire);
+}
+
+// CHECK-LABEL: define dso_local void @test_and_set_release(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: ret void
+//
+void test_and_set_release(char *ptr) {
+ __atomic_test_and_set(ptr, memory_order_release);
+}
+
+// CHECK-LABEL: define dso_local void @test_and_set_acq_rel(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: ret void
+//
+void test_and_set_acq_rel(char *ptr) {
+ __atomic_test_and_set(ptr, memory_order_acq_rel);
+}
+
+// CHECK-LABEL: define dso_local void @test_and_set_seq_cst(
+// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: ret void
+//
+void test_and_set_seq_cst(char *ptr) {
+ __atomic_test_and_set(ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: define dso_local void @test_and_set_dynamic(
+// CHECK-SAME: ptr noundef [[PTR:%.*]], i32 noundef [[ORDER:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[ORDER_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: store i32 [[ORDER]], ptr [[ORDER_ADDR]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ORDER_ADDR]], align 4
+// CHECK-NEXT: switch i32 [[TMP1]], label %[[MONOTONIC:.*]] [
+// CHECK-NEXT: i32 0, label %[[MONOTONIC]]
+// CHECK-NEXT: i32 1, label %[[ACQUIRE:.*]]
+// CHECK-NEXT: i32 2, label %[[ACQUIRE]]
+// CHECK-NEXT: i32 3, label %[[RELEASE:.*]]
+// CHECK-NEXT: i32 4, label %[[ACQREL:.*]]
+// CHECK-NEXT: i32 5, label %[[SEQCST:.*]]
+// CHECK-NEXT: ]
+// CHECK: [[ATOMIC_CONTINUE:.*]]:
+// CHECK-NEXT: [[WAS_SET:%.*]] = phi i8 [ [[TMP2:%.*]], %[[MONOTONIC]] ], [ [[TMP3:%.*]], %[[ACQUIRE]] ], [ [[TMP4:%.*]], %[[RELEASE]] ], [ [[TMP5:%.*]], %[[ACQREL]] ], [ [[TMP6:%.*]], %[[SEQCST]] ]
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[WAS_SET]], 0
+// CHECK-NEXT: ret void
+// CHECK: [[MONOTONIC]]:
+// CHECK-NEXT: [[TMP2]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK: [[ACQUIRE]]:
+// CHECK-NEXT: [[TMP3]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK: [[RELEASE]]:
+// CHECK-NEXT: [[TMP4]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK: [[ACQREL]]:
+// CHECK-NEXT: [[TMP5]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK: [[SEQCST]]:
+// CHECK-NEXT: [[TMP6]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+//
+void test_and_set_dynamic(char *ptr, int order) {
+ __atomic_test_and_set(ptr, order);
+}
>From 260eb97e1b80e5c6c0629a1e6bb48b8726305833 Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Wed, 18 Dec 2024 15:29:03 +0000
Subject: [PATCH 2/4] [Clang] Re-write codegen for atomic_test_and_set and
atomic_clear
Re-write the sema and codegen for the atomic_test_and_set and
atomic_clear builtin functions to go via AtomicExpr, like the other
atomic builtins do. This simplifies the code, because AtomicExpr already
handles things like generating code for to dynamically select the memory
ordering, which was duplicated for these builtins. This also fixes a few
crash bugs, one when passing an integer to the pointer argument, and one
when using an array.
Fixes #111293.
---
clang/include/clang/Basic/Builtins.td | 8 +-
clang/lib/AST/Expr.cpp | 2 +
clang/lib/CodeGen/CGAtomic.cpp | 25 +++-
clang/lib/CodeGen/CGBuiltin.cpp | 141 -----------------------
clang/lib/Sema/SemaChecking.cpp | 32 ++++-
clang/test/CodeGen/atomic-test-and-set.c | 81 ++++++++++---
clang/test/Sema/atomic-ops.c | 5 +-
7 files changed, 125 insertions(+), 169 deletions(-)
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index d64a66fc9d9cf7..b11e23bb2d6ad3 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -1977,15 +1977,15 @@ def AtomicNandFetch : AtomicBuiltin {
let Prototype = "void(...)";
}
-def AtomicTestAndSet : Builtin {
+def AtomicTestAndSet : AtomicBuiltin {
let Spellings = ["__atomic_test_and_set"];
- let Attributes = [NoThrow];
+ let Attributes = [NoThrow, CustomTypeChecking];
let Prototype = "bool(void volatile*, int)";
}
-def AtomicClear : Builtin {
+def AtomicClear : AtomicBuiltin {
let Spellings = ["__atomic_clear"];
- let Attributes = [NoThrow];
+ let Attributes = [NoThrow, CustomTypeChecking];
let Prototype = "void(void volatile*, int)";
}
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index 8c8ccdb61dc01c..7e6cb53064ff2b 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -5070,6 +5070,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__opencl_atomic_init:
case AO__c11_atomic_load:
case AO__atomic_load_n:
+ case AO__atomic_test_and_set:
+ case AO__atomic_clear:
return 2;
case AO__scoped_atomic_load_n:
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index f6cb2ad421e906..3adb2a7ad207f0 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -723,6 +723,24 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_fetch_nand:
Op = llvm::AtomicRMWInst::Nand;
break;
+
+ case AtomicExpr::AO__atomic_test_and_set: {
+ llvm::AtomicRMWInst *RMWI =
+ CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr,
+ CGF.Builder.getInt8(1), Order, Scope, E);
+ RMWI->setVolatile(E->isVolatile());
+ llvm::Value *Result = CGF.Builder.CreateIsNotNull(RMWI, "tobool");
+ CGF.Builder.CreateStore(Result, Dest);
+ return;
+ }
+
+ case AtomicExpr::AO__atomic_clear: {
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.getInt8(0), Ptr);
+ Store->setAtomic(Order, Scope);
+ Store->setVolatile(E->isVolatile());
+ return;
+ }
}
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
@@ -878,6 +896,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
+ case AtomicExpr::AO__atomic_test_and_set:
+ case AtomicExpr::AO__atomic_clear:
break;
case AtomicExpr::AO__atomic_load:
@@ -1200,6 +1220,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__scoped_atomic_fetch_max:
case AtomicExpr::AO__scoped_atomic_max_fetch:
+ case AtomicExpr::AO__atomic_test_and_set:
+ case AtomicExpr::AO__atomic_clear:
llvm_unreachable("Integral atomic operations always become atomicrmw!");
}
@@ -1239,7 +1261,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
E->getOp() == AtomicExpr::AO__atomic_store ||
E->getOp() == AtomicExpr::AO__atomic_store_n ||
E->getOp() == AtomicExpr::AO__scoped_atomic_store ||
- E->getOp() == AtomicExpr::AO__scoped_atomic_store_n;
+ E->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
+ E->getOp() == AtomicExpr::AO__atomic_clear;
bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
E->getOp() == AtomicExpr::AO__hip_atomic_load ||
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 4d4b7428abd505..0ea2ee4c264aef 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -5099,147 +5099,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
ReturnValueSlot(), Args);
}
- case Builtin::BI__atomic_test_and_set: {
- // Look at the argument type to determine whether this is a volatile
- // operation. The parameter type is always volatile.
- QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
- bool Volatile =
- PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
-
- Address Ptr =
- EmitPointerWithAlignment(E->getArg(0)).withElementType(Int8Ty);
-
- Value *NewVal = Builder.getInt8(1);
- Value *Order = EmitScalarExpr(E->getArg(1));
- if (isa<llvm::ConstantInt>(Order)) {
- int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
- AtomicRMWInst *Result = nullptr;
- switch (ord) {
- case 0: // memory_order_relaxed
- default: // invalid order
- Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::Monotonic);
- break;
- case 1: // memory_order_consume
- case 2: // memory_order_acquire
- Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::Acquire);
- break;
- case 3: // memory_order_release
- Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::Release);
- break;
- case 4: // memory_order_acq_rel
-
- Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::AcquireRelease);
- break;
- case 5: // memory_order_seq_cst
- Result = Builder.CreateAtomicRMW(
- llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
- llvm::AtomicOrdering::SequentiallyConsistent);
- break;
- }
- Result->setVolatile(Volatile);
- return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
- }
-
- llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
-
- llvm::BasicBlock *BBs[5] = {
- createBasicBlock("monotonic", CurFn),
- createBasicBlock("acquire", CurFn),
- createBasicBlock("release", CurFn),
- createBasicBlock("acqrel", CurFn),
- createBasicBlock("seqcst", CurFn)
- };
- llvm::AtomicOrdering Orders[5] = {
- llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
- llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
- llvm::AtomicOrdering::SequentiallyConsistent};
-
- Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
- llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
-
- Builder.SetInsertPoint(ContBB);
- PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
-
- for (unsigned i = 0; i < 5; ++i) {
- Builder.SetInsertPoint(BBs[i]);
- AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
- Ptr, NewVal, Orders[i]);
- RMW->setVolatile(Volatile);
- Result->addIncoming(RMW, BBs[i]);
- Builder.CreateBr(ContBB);
- }
-
- SI->addCase(Builder.getInt32(0), BBs[0]);
- SI->addCase(Builder.getInt32(1), BBs[1]);
- SI->addCase(Builder.getInt32(2), BBs[1]);
- SI->addCase(Builder.getInt32(3), BBs[2]);
- SI->addCase(Builder.getInt32(4), BBs[3]);
- SI->addCase(Builder.getInt32(5), BBs[4]);
-
- Builder.SetInsertPoint(ContBB);
- return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
- }
-
- case Builtin::BI__atomic_clear: {
- QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
- bool Volatile =
- PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
-
- Address Ptr = EmitPointerWithAlignment(E->getArg(0));
- Ptr = Ptr.withElementType(Int8Ty);
- Value *NewVal = Builder.getInt8(0);
- Value *Order = EmitScalarExpr(E->getArg(1));
- if (isa<llvm::ConstantInt>(Order)) {
- int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
- StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
- switch (ord) {
- case 0: // memory_order_relaxed
- default: // invalid order
- Store->setOrdering(llvm::AtomicOrdering::Monotonic);
- break;
- case 3: // memory_order_release
- Store->setOrdering(llvm::AtomicOrdering::Release);
- break;
- case 5: // memory_order_seq_cst
- Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
- break;
- }
- return RValue::get(nullptr);
- }
-
- llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
-
- llvm::BasicBlock *BBs[3] = {
- createBasicBlock("monotonic", CurFn),
- createBasicBlock("release", CurFn),
- createBasicBlock("seqcst", CurFn)
- };
- llvm::AtomicOrdering Orders[3] = {
- llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
- llvm::AtomicOrdering::SequentiallyConsistent};
-
- Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
- llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
-
- for (unsigned i = 0; i < 3; ++i) {
- Builder.SetInsertPoint(BBs[i]);
- StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
- Store->setOrdering(Orders[i]);
- Builder.CreateBr(ContBB);
- }
-
- SI->addCase(Builder.getInt32(0), BBs[0]);
- SI->addCase(Builder.getInt32(3), BBs[1]);
- SI->addCase(Builder.getInt32(5), BBs[2]);
-
- Builder.SetInsertPoint(ContBB);
- return RValue::get(nullptr);
- }
-
case Builtin::BI__atomic_thread_fence:
case Builtin::BI__atomic_signal_fence:
case Builtin::BI__c11_atomic_thread_fence:
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index a248a6b53b0d06..e33289e4d0801b 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -3683,12 +3683,18 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
C11CmpXchg,
// bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
- GNUCmpXchg
+ GNUCmpXchg,
+
+ // bool __atomic_test_and_set(A *, int)
+ TestAndSet,
+
+ // void __atomic_clear(A *, int)
+ Clear,
} Form = Init;
- const unsigned NumForm = GNUCmpXchg + 1;
- const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
- const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
+ const unsigned NumForm = Clear + 1;
+ const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6, 2, 2 };
+ const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3, 0, 0 };
// where:
// C is an appropriate type,
// A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
@@ -3849,6 +3855,14 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
Form = GNUCmpXchg;
break;
+
+ case AtomicExpr::AO__atomic_test_and_set:
+ Form = TestAndSet;
+ break;
+
+ case AtomicExpr::AO__atomic_clear:
+ Form = Clear;
+ break;
}
unsigned AdjustedNumArgs = NumArgs[Form];
@@ -3995,9 +4009,9 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
ValType.removeLocalConst();
QualType ResultType = ValType;
if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
- Form == Init)
+ Form == Init || Form == Clear)
ResultType = Context.VoidTy;
- else if (Form == C11CmpXchg || Form == GNUCmpXchg)
+ else if (Form == C11CmpXchg || Form == GNUCmpXchg || Form == TestAndSet)
ResultType = Context.BoolTy;
// The type of a parameter passed 'by value'. In the GNU atomics, such
@@ -4042,6 +4056,10 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
APIOrderedArgs.push_back(Args[1]); // Order
APIOrderedArgs.push_back(Args[3]); // OrderFail
break;
+ case TestAndSet:
+ case Clear:
+ APIOrderedArgs.push_back(Args[1]); // Order
+ break;
}
} else
APIOrderedArgs.append(Args.begin(), Args.end());
@@ -4127,6 +4145,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SubExprs.push_back(APIOrderedArgs[1]); // Val1
break;
case Load:
+ case TestAndSet:
+ case Clear:
SubExprs.push_back(APIOrderedArgs[1]); // Order
break;
case LoadCopy:
diff --git a/clang/test/CodeGen/atomic-test-and-set.c b/clang/test/CodeGen/atomic-test-and-set.c
index a736849f16e3ac..bb05623f897551 100644
--- a/clang/test/CodeGen/atomic-test-and-set.c
+++ b/clang/test/CodeGen/atomic-test-and-set.c
@@ -53,21 +53,20 @@ void clear_release(char *ptr) {
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ORDER_ADDR]], align 4
// CHECK-NEXT: switch i32 [[TMP1]], label %[[MONOTONIC:.*]] [
-// CHECK-NEXT: i32 0, label %[[MONOTONIC]]
// CHECK-NEXT: i32 3, label %[[RELEASE:.*]]
// CHECK-NEXT: i32 5, label %[[SEQCST:.*]]
// CHECK-NEXT: ]
-// CHECK: [[ATOMIC_CONTINUE:.*]]:
-// CHECK-NEXT: ret void
// CHECK: [[MONOTONIC]]:
// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1
-// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE:.*]]
// CHECK: [[RELEASE]]:
// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] release, align 1
// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
// CHECK: [[SEQCST]]:
// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] seq_cst, align 1
// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK: [[ATOMIC_CONTINUE]]:
+// CHECK-NEXT: ret void
//
void clear_dynamic(char *ptr, int order) {
__atomic_clear(ptr, order);
@@ -77,10 +76,14 @@ void clear_dynamic(char *ptr, int order) {
// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1
// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1
// CHECK-NEXT: ret void
//
void test_and_set_relaxed(char *ptr) {
@@ -91,10 +94,14 @@ void test_and_set_relaxed(char *ptr) {
// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1
// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1
// CHECK-NEXT: ret void
//
void test_and_set_consume(char *ptr) {
@@ -105,10 +112,14 @@ void test_and_set_consume(char *ptr) {
// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1
// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1
// CHECK-NEXT: ret void
//
void test_and_set_acquire(char *ptr) {
@@ -119,10 +130,14 @@ void test_and_set_acquire(char *ptr) {
// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1
// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1
// CHECK-NEXT: ret void
//
void test_and_set_release(char *ptr) {
@@ -133,10 +148,14 @@ void test_and_set_release(char *ptr) {
// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1
// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1
// CHECK-NEXT: ret void
//
void test_and_set_acq_rel(char *ptr) {
@@ -147,10 +166,14 @@ void test_and_set_acq_rel(char *ptr) {
// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1
// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0
+// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1
// CHECK-NEXT: ret void
//
void test_and_set_seq_cst(char *ptr) {
@@ -162,38 +185,66 @@ void test_and_set_seq_cst(char *ptr) {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[ORDER_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store i32 [[ORDER]], ptr [[ORDER_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ORDER_ADDR]], align 4
// CHECK-NEXT: switch i32 [[TMP1]], label %[[MONOTONIC:.*]] [
-// CHECK-NEXT: i32 0, label %[[MONOTONIC]]
// CHECK-NEXT: i32 1, label %[[ACQUIRE:.*]]
// CHECK-NEXT: i32 2, label %[[ACQUIRE]]
// CHECK-NEXT: i32 3, label %[[RELEASE:.*]]
// CHECK-NEXT: i32 4, label %[[ACQREL:.*]]
// CHECK-NEXT: i32 5, label %[[SEQCST:.*]]
// CHECK-NEXT: ]
-// CHECK: [[ATOMIC_CONTINUE:.*]]:
-// CHECK-NEXT: [[WAS_SET:%.*]] = phi i8 [ [[TMP2:%.*]], %[[MONOTONIC]] ], [ [[TMP3:%.*]], %[[ACQUIRE]] ], [ [[TMP4:%.*]], %[[RELEASE]] ], [ [[TMP5:%.*]], %[[ACQREL]] ], [ [[TMP6:%.*]], %[[SEQCST]] ]
-// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[WAS_SET]], 0
-// CHECK-NEXT: ret void
// CHECK: [[MONOTONIC]]:
-// CHECK-NEXT: [[TMP2]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1
-// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP2]], 0
+// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: br label %[[ATOMIC_CONTINUE:.*]]
// CHECK: [[ACQUIRE]]:
-// CHECK-NEXT: [[TMP3]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1
+// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1
+// CHECK-NEXT: [[TOBOOL1:%.*]] = icmp ne i8 [[TMP3]], 0
+// CHECK-NEXT: store i1 [[TOBOOL1]], ptr [[ATOMIC_TEMP]], align 1
// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
// CHECK: [[RELEASE]]:
-// CHECK-NEXT: [[TMP4]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1
+// CHECK-NEXT: [[TMP4:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1
+// CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i8 [[TMP4]], 0
+// CHECK-NEXT: store i1 [[TOBOOL2]], ptr [[ATOMIC_TEMP]], align 1
// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
// CHECK: [[ACQREL]]:
-// CHECK-NEXT: [[TMP5]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1
+// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1
+// CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i8 [[TMP5]], 0
+// CHECK-NEXT: store i1 [[TOBOOL3]], ptr [[ATOMIC_TEMP]], align 1
// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
// CHECK: [[SEQCST]]:
-// CHECK-NEXT: [[TMP6]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1
+// CHECK-NEXT: [[TMP6:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1
+// CHECK-NEXT: [[TOBOOL4:%.*]] = icmp ne i8 [[TMP6]], 0
+// CHECK-NEXT: store i1 [[TOBOOL4]], ptr [[ATOMIC_TEMP]], align 1
// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]]
+// CHECK: [[ATOMIC_CONTINUE]]:
+// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT: ret void
//
void test_and_set_dynamic(char *ptr, int order) {
__atomic_test_and_set(ptr, order);
}
+
+// CHECK-LABEL: define dso_local void @test_and_set_array(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[X:%.*]] = alloca [10 x i32], align 4
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], ptr [[X]], i64 0, i64 0
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw volatile xchg ptr [[ARRAYDECAY]], i8 1 seq_cst, align 4
+// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP0]], 0
+// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 4
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP1]] to i1
+// CHECK-NEXT: ret void
+//
+void test_and_set_array() {
+ volatile int x[10];
+ __atomic_test_and_set(x, memory_order_seq_cst);
+}
diff --git a/clang/test/Sema/atomic-ops.c b/clang/test/Sema/atomic-ops.c
index 2405f804d0da54..713cdd4f0635d6 100644
--- a/clang/test/Sema/atomic-ops.c
+++ b/clang/test/Sema/atomic-ops.c
@@ -284,11 +284,12 @@ void f(_Atomic(int) *i, const _Atomic(int) *ci,
const volatile int flag_k = 0;
volatile int flag = 0;
- (void)(int)__atomic_test_and_set(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}}
+ (void)(int)__atomic_test_and_set(&flag_k, memory_order_seq_cst); // expected-error {{address argument to atomic operation must be a pointer to non-const type ('const volatile int *' invalid)}}
(void)(int)__atomic_test_and_set(&flag, memory_order_seq_cst);
- __atomic_clear(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}}
+ __atomic_clear(&flag_k, memory_order_seq_cst); // expected-error {{address argument to atomic operation must be a pointer to non-const type ('const volatile int *' invalid)}}
__atomic_clear(&flag, memory_order_seq_cst);
(int)__atomic_clear(&flag, memory_order_seq_cst); // expected-error {{operand of type 'void'}}
+ __atomic_clear(0x8000, memory_order_seq_cst); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
__c11_atomic_init(ci, 0); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
__c11_atomic_store(ci, 0, memory_order_release); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
>From 061b665db29b12f15cf3fee76c8e124d07ff050d Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Wed, 18 Dec 2024 15:51:15 +0000
Subject: [PATCH 3/4] Diagnose invalid order for atomic_clear
These memory orderings are not valid for the atomic_clear builtin
according to
https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html, so we
should diagnose them.
---
clang/lib/Sema/SemaChecking.cpp | 1 +
clang/test/Sema/atomic-ops.c | 3 +++
2 files changed, 4 insertions(+)
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index e33289e4d0801b..8597b346b297c1 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -3631,6 +3631,7 @@ static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
case AtomicExpr::AO__atomic_store_n:
case AtomicExpr::AO__scoped_atomic_store:
case AtomicExpr::AO__scoped_atomic_store_n:
+ case AtomicExpr::AO__atomic_clear:
return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
diff --git a/clang/test/Sema/atomic-ops.c b/clang/test/Sema/atomic-ops.c
index 713cdd4f0635d6..c3837cf865df8f 100644
--- a/clang/test/Sema/atomic-ops.c
+++ b/clang/test/Sema/atomic-ops.c
@@ -290,6 +290,9 @@ void f(_Atomic(int) *i, const _Atomic(int) *ci,
__atomic_clear(&flag, memory_order_seq_cst);
(int)__atomic_clear(&flag, memory_order_seq_cst); // expected-error {{operand of type 'void'}}
__atomic_clear(0x8000, memory_order_seq_cst); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}}
+ __atomic_clear(&flag, memory_order_consume); // expected-warning {{memory order argument to atomic operation is invalid}}
+ __atomic_clear(&flag, memory_order_acquire); // expected-warning {{memory order argument to atomic operation is invalid}}
+ __atomic_clear(&flag, memory_order_acq_rel); // expected-warning {{memory order argument to atomic operation is invalid}}
__c11_atomic_init(ci, 0); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
__c11_atomic_store(ci, 0, memory_order_release); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}
>From e7b150a2671ed634198ca989bd8bb1cdbfa10277 Mon Sep 17 00:00:00 2001
From: Oliver Stannard <oliver.stannard at arm.com>
Date: Wed, 18 Dec 2024 17:09:32 +0000
Subject: [PATCH 4/4] clang-format
---
clang/lib/Sema/SemaChecking.cpp | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 8597b346b297c1..324ed7f6d90b75 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -3694,8 +3694,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
} Form = Init;
const unsigned NumForm = Clear + 1;
- const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6, 2, 2 };
- const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3, 0, 0 };
+ const unsigned NumArgs[] = {2, 2, 3, 3, 3, 3, 4, 5, 6, 2, 2};
+ const unsigned NumVals[] = {1, 0, 1, 1, 1, 1, 2, 2, 3, 0, 0};
// where:
// C is an appropriate type,
// A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
@@ -4009,8 +4009,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
ValType.removeLocalVolatile();
ValType.removeLocalConst();
QualType ResultType = ValType;
- if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
- Form == Init || Form == Clear)
+ if (Form == Copy || Form == LoadCopy || Form == GNUXchg || Form == Init ||
+ Form == Clear)
ResultType = Context.VoidTy;
else if (Form == C11CmpXchg || Form == GNUCmpXchg || Form == TestAndSet)
ResultType = Context.BoolTy;
More information about the cfe-commits
mailing list