[clang] 5cf5876 - Atomics: support min/max orthogonally

Tim Northover via cfe-commits cfe-commits at lists.llvm.org
Thu Nov 21 02:38:07 PST 2019


Author: Tim Northover
Date: 2019-11-21T10:37:56Z
New Revision: 5cf58768cb3ba31ee37facaf23f7a74f78781590

URL: https://github.com/llvm/llvm-project/commit/5cf58768cb3ba31ee37facaf23f7a74f78781590
DIFF: https://github.com/llvm/llvm-project/commit/5cf58768cb3ba31ee37facaf23f7a74f78781590.diff

LOG: Atomics: support min/max orthogonally

We seem to have been gradually growing support for atomic min/max operations
(exposing longstanding IR atomicrmw instructions). But until now there have
been gaps in the expected intrinsics. This adds support for the C11-style
intrinsics (i.e. taking _Atomic, rather than individually blessed by C11
standard), and the variants that return the new value instead of the original
one.

That way, people won't be misled by trying one form and it not working, and the
front-end is more friendly to people using _Atomic types, as we recommend.

Added: 
    

Modified: 
    clang/docs/LanguageExtensions.rst
    clang/include/clang/Basic/Builtins.def
    clang/include/clang/Basic/DiagnosticSemaKinds.td
    clang/lib/AST/Expr.cpp
    clang/lib/CodeGen/CGAtomic.cpp
    clang/lib/Sema/SemaChecking.cpp
    clang/test/CodeGen/atomic-ops.c
    clang/test/Sema/atomic-ops.c
    clang/test/SemaOpenCL/atomic-ops.cl

Removed: 
    


################################################################################
diff  --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst
index a9fb85fa0d8d..d9a4862dbe80 100644
--- a/clang/docs/LanguageExtensions.rst
+++ b/clang/docs/LanguageExtensions.rst
@@ -2586,6 +2586,8 @@ the corresponding C11 operations, are:
 * ``__c11_atomic_fetch_and``
 * ``__c11_atomic_fetch_or``
 * ``__c11_atomic_fetch_xor``
+* ``__c11_atomic_fetch_max``
+* ``__c11_atomic_fetch_min``
 
 The macros ``__ATOMIC_RELAXED``, ``__ATOMIC_CONSUME``, ``__ATOMIC_ACQUIRE``,
 ``__ATOMIC_RELEASE``, ``__ATOMIC_ACQ_REL``, and ``__ATOMIC_SEQ_CST`` are

diff  --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def
index 4ed00a13b004..8a102744700f 100644
--- a/clang/include/clang/Basic/Builtins.def
+++ b/clang/include/clang/Basic/Builtins.def
@@ -718,6 +718,8 @@ ATOMIC_BUILTIN(__c11_atomic_fetch_sub, "v.", "t")
 ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t")
 ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t")
 ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_max, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_min, "v.", "t")
 BUILTIN(__c11_atomic_thread_fence, "vi", "n")
 BUILTIN(__c11_atomic_signal_fence, "vi", "n")
 BUILTIN(__c11_atomic_is_lock_free, "iz", "n")
@@ -742,6 +744,8 @@ ATOMIC_BUILTIN(__atomic_sub_fetch, "v.", "t")
 ATOMIC_BUILTIN(__atomic_and_fetch, "v.", "t")
 ATOMIC_BUILTIN(__atomic_or_fetch, "v.", "t")
 ATOMIC_BUILTIN(__atomic_xor_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_max_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_min_fetch, "v.", "t")
 ATOMIC_BUILTIN(__atomic_nand_fetch, "v.", "t")
 BUILTIN(__atomic_test_and_set, "bvD*i", "n")
 BUILTIN(__atomic_clear, "vvD*i", "n")

diff  --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 78ff18b8798e..eb05a7e910c9 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -7430,10 +7430,8 @@ def err_atomic_op_needs_trivial_copy : Error<
 def err_atomic_op_needs_atomic_int_or_ptr : Error<
   "address argument to atomic operation must be a pointer to %select{|atomic }0"
   "integer or pointer (%1 invalid)">;
-def err_atomic_op_needs_int32_or_ptr : Error<
-  "address argument to atomic operation must be a pointer to signed or unsigned 32-bit integer">;
-def err_atomic_op_bitwise_needs_atomic_int : Error<
-  "address argument to bitwise atomic operation must be a pointer to "
+def err_atomic_op_needs_atomic_int : Error<
+  "address argument to atomic operation must be a pointer to "
   "%select{|atomic }0integer (%1 invalid)">;
 def warn_atomic_op_has_invalid_memory_order : Warning<
   "memory order argument to atomic operation is invalid">,

diff  --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index e5bb8a778c18..3f722f8fd541 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -4603,6 +4603,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
   case AO__c11_atomic_fetch_and:
   case AO__c11_atomic_fetch_or:
   case AO__c11_atomic_fetch_xor:
+  case AO__c11_atomic_fetch_max:
+  case AO__c11_atomic_fetch_min:
   case AO__atomic_fetch_add:
   case AO__atomic_fetch_sub:
   case AO__atomic_fetch_and:
@@ -4615,6 +4617,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
   case AO__atomic_or_fetch:
   case AO__atomic_xor_fetch:
   case AO__atomic_nand_fetch:
+  case AO__atomic_min_fetch:
+  case AO__atomic_max_fetch:
   case AO__atomic_fetch_min:
   case AO__atomic_fetch_max:
     return 3;

diff  --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 505916350750..039fe6da8420 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -488,13 +488,36 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
   CGF.Builder.SetInsertPoint(ContBB);
 }
 
+/// Duplicate the atomic min/max operation in conventional IR for the builtin
+/// variants that return the new rather than the original value.
+static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
+                                         AtomicExpr::AtomicOp Op,
+                                         bool IsSigned,
+                                         llvm::Value *OldVal,
+                                         llvm::Value *RHS) {
+  llvm::CmpInst::Predicate Pred;
+  switch (Op) {
+  default:
+    llvm_unreachable("Unexpected min/max operation");
+  case AtomicExpr::AO__atomic_max_fetch:
+    Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
+    break;
+  case AtomicExpr::AO__atomic_min_fetch:
+    Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
+    break;
+  }
+  llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
+  return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
+}
+
 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
                          Address Ptr, Address Val1, Address Val2,
                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
                          uint64_t Size, llvm::AtomicOrdering Order,
                          llvm::SyncScope::ID Scope) {
   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
-  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+  bool PostOpMinMax = false;
+  unsigned PostOp = 0;
 
   switch (E->getOp()) {
   case AtomicExpr::AO__c11_atomic_init:
@@ -588,12 +611,20 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
     Op = llvm::AtomicRMWInst::Sub;
     break;
 
+  case AtomicExpr::AO__atomic_min_fetch:
+    PostOpMinMax = true;
+    LLVM_FALLTHROUGH;
+  case AtomicExpr::AO__c11_atomic_fetch_min:
   case AtomicExpr::AO__opencl_atomic_fetch_min:
   case AtomicExpr::AO__atomic_fetch_min:
     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
                                                   : llvm::AtomicRMWInst::UMin;
     break;
 
+  case AtomicExpr::AO__atomic_max_fetch:
+    PostOpMinMax = true;
+    LLVM_FALLTHROUGH;
+  case AtomicExpr::AO__c11_atomic_fetch_max:
   case AtomicExpr::AO__opencl_atomic_fetch_max:
   case AtomicExpr::AO__atomic_fetch_max:
     Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
@@ -643,8 +674,13 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
   // For __atomic_*_fetch operations, perform the operation again to
   // determine the value which was written.
   llvm::Value *Result = RMWI;
-  if (PostOp)
-    Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+  if (PostOpMinMax)
+    Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
+                                  E->getValueType()->isSignedIntegerType(),
+                                  RMWI, LoadVal1);
+  else if (PostOp)
+    Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
+                                     LoadVal1);
   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
     Result = CGF.Builder.CreateNot(Result);
   CGF.Builder.CreateStore(Result, Dest);
@@ -853,6 +889,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
   case AtomicExpr::AO__c11_atomic_fetch_and:
   case AtomicExpr::AO__c11_atomic_fetch_or:
   case AtomicExpr::AO__c11_atomic_fetch_xor:
+  case AtomicExpr::AO__c11_atomic_fetch_max:
+  case AtomicExpr::AO__c11_atomic_fetch_min:
   case AtomicExpr::AO__opencl_atomic_fetch_and:
   case AtomicExpr::AO__opencl_atomic_fetch_or:
   case AtomicExpr::AO__opencl_atomic_fetch_xor:
@@ -866,8 +904,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
   case AtomicExpr::AO__atomic_or_fetch:
   case AtomicExpr::AO__atomic_xor_fetch:
   case AtomicExpr::AO__atomic_nand_fetch:
-  case AtomicExpr::AO__atomic_fetch_min:
+  case AtomicExpr::AO__atomic_max_fetch:
+  case AtomicExpr::AO__atomic_min_fetch:
   case AtomicExpr::AO__atomic_fetch_max:
+  case AtomicExpr::AO__atomic_fetch_min:
     Val1 = EmitValToTemp(*this, E->getVal1());
     break;
   }
@@ -916,14 +956,18 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
     case AtomicExpr::AO__opencl_atomic_fetch_min:
     case AtomicExpr::AO__opencl_atomic_fetch_max:
     case AtomicExpr::AO__atomic_fetch_xor:
+    case AtomicExpr::AO__c11_atomic_fetch_max:
+    case AtomicExpr::AO__c11_atomic_fetch_min:
     case AtomicExpr::AO__atomic_add_fetch:
     case AtomicExpr::AO__atomic_and_fetch:
     case AtomicExpr::AO__atomic_nand_fetch:
     case AtomicExpr::AO__atomic_or_fetch:
     case AtomicExpr::AO__atomic_sub_fetch:
     case AtomicExpr::AO__atomic_xor_fetch:
-    case AtomicExpr::AO__atomic_fetch_min:
     case AtomicExpr::AO__atomic_fetch_max:
+    case AtomicExpr::AO__atomic_fetch_min:
+    case AtomicExpr::AO__atomic_max_fetch:
+    case AtomicExpr::AO__atomic_min_fetch:
       // For these, only library calls for certain sizes exist.
       UseOptimizedLibcall = true;
       break;
@@ -991,6 +1035,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
     QualType RetTy;
     bool HaveRetTy = false;
     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+    bool PostOpMinMax = false;
     switch (E->getOp()) {
     case AtomicExpr::AO__c11_atomic_init:
     case AtomicExpr::AO__opencl_atomic_init:
@@ -1112,6 +1157,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
                         MemTy, E->getExprLoc(), sizeChars);
       break;
+    case AtomicExpr::AO__atomic_min_fetch:
+      PostOpMinMax = true;
+      LLVM_FALLTHROUGH;
+    case AtomicExpr::AO__c11_atomic_fetch_min:
     case AtomicExpr::AO__atomic_fetch_min:
     case AtomicExpr::AO__opencl_atomic_fetch_min:
       LibCallName = E->getValueType()->isSignedIntegerType()
@@ -1120,6 +1169,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
                         LoweredMemTy, E->getExprLoc(), sizeChars);
       break;
+    case AtomicExpr::AO__atomic_max_fetch:
+      PostOpMinMax = true;
+      LLVM_FALLTHROUGH;
+    case AtomicExpr::AO__c11_atomic_fetch_max:
     case AtomicExpr::AO__atomic_fetch_max:
     case AtomicExpr::AO__opencl_atomic_fetch_max:
       LibCallName = E->getValueType()->isSignedIntegerType()
@@ -1171,7 +1224,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
     // PostOp is only needed for the atomic_*_fetch operations, and
     // thus is only needed for and implemented in the
     // UseOptimizedLibcall codepath.
-    assert(UseOptimizedLibcall || !PostOp);
+    assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
 
     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
     // The value is returned directly from the libcall.
@@ -1182,7 +1235,12 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
     // provided an out-param.
     if (UseOptimizedLibcall && Res.getScalarVal()) {
       llvm::Value *ResVal = Res.getScalarVal();
-      if (PostOp) {
+      if (PostOpMinMax) {
+        llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
+        ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
+                                      E->getValueType()->isSignedIntegerType(),
+                                      ResVal, LoadVal1);
+      } else if (PostOp) {
         llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
       }

diff  --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index a4218add4e71..c19badf80137 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -4572,20 +4572,19 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
       && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
       "need to update code for modified forms");
   static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
-                    AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
+                    AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
                         AtomicExpr::AO__atomic_load,
                 "need to update code for modified C11 atomics");
   bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
                   Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
   bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
-               Op <= AtomicExpr::AO__c11_atomic_fetch_xor) ||
+               Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
                IsOpenCL;
   bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
              Op == AtomicExpr::AO__atomic_store_n ||
              Op == AtomicExpr::AO__atomic_exchange_n ||
              Op == AtomicExpr::AO__atomic_compare_exchange_n;
   bool IsAddSub = false;
-  bool IsMinMax = false;
 
   switch (Op) {
   case AtomicExpr::AO__c11_atomic_init:
@@ -4636,12 +4635,12 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
   case AtomicExpr::AO__atomic_or_fetch:
   case AtomicExpr::AO__atomic_xor_fetch:
   case AtomicExpr::AO__atomic_nand_fetch:
-    Form = Arithmetic;
-    break;
-
+  case AtomicExpr::AO__c11_atomic_fetch_min:
+  case AtomicExpr::AO__c11_atomic_fetch_max:
+  case AtomicExpr::AO__atomic_min_fetch:
+  case AtomicExpr::AO__atomic_max_fetch:
   case AtomicExpr::AO__atomic_fetch_min:
   case AtomicExpr::AO__atomic_fetch_max:
-    IsMinMax = true;
     Form = Arithmetic;
     break;
 
@@ -4733,16 +4732,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
           << IsC11 << Ptr->getType() << Ptr->getSourceRange();
       return ExprError();
     }
-    if (IsMinMax) {
-      const BuiltinType *BT = ValType->getAs<BuiltinType>();
-      if (!BT || (BT->getKind() != BuiltinType::Int &&
-                  BT->getKind() != BuiltinType::UInt)) {
-        Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_int32_or_ptr);
-        return ExprError();
-      }
-    }
-    if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
-      Diag(ExprRange.getBegin(), diag::err_atomic_op_bitwise_needs_atomic_int)
+    if (!IsAddSub && !ValType->isIntegerType()) {
+      Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int)
           << IsC11 << Ptr->getType() << Ptr->getSourceRange();
       return ExprError();
     }

diff  --git a/clang/test/CodeGen/atomic-ops.c b/clang/test/CodeGen/atomic-ops.c
index c3a9729d4901..25ecb4328876 100644
--- a/clang/test/CodeGen/atomic-ops.c
+++ b/clang/test/CodeGen/atomic-ops.c
@@ -661,4 +661,81 @@ void test_underaligned() {
   __atomic_compare_exchange(&aligned_a, &aligned_b, &aligned_c, 1, memory_order_seq_cst, memory_order_seq_cst);
 }
 
+void test_c11_minmax(_Atomic(int) * si, _Atomic(unsigned) * ui, _Atomic(short) * ss, _Atomic(unsigned char) * uc, _Atomic(long long) * sll) {
+  // CHECK-LABEL: @test_c11_minmax
+
+  // CHECK: atomicrmw max i32
+  *si = __c11_atomic_fetch_max(si, 42, memory_order_acquire);
+  // CHECK: atomicrmw min i32
+  *si = __c11_atomic_fetch_min(si, 42, memory_order_acquire);
+  // CHECK: atomicrmw umax i32
+  *ui = __c11_atomic_fetch_max(ui, 42, memory_order_acquire);
+  // CHECK: atomicrmw umin i32
+  *ui = __c11_atomic_fetch_min(ui, 42, memory_order_acquire);
+
+  // CHECK: atomicrmw max i16
+  *ss = __c11_atomic_fetch_max(ss, 42, memory_order_acquire);
+  // CHECK: atomicrmw min i16
+  *ss = __c11_atomic_fetch_min(ss, 42, memory_order_acquire);
+
+  // CHECK: atomicrmw umax i8
+  *uc = __c11_atomic_fetch_max(uc, 42, memory_order_acquire);
+  // CHECK: atomicrmw umin i8
+  *uc = __c11_atomic_fetch_min(uc, 42, memory_order_acquire);
+
+  // CHECK: atomicrmw max i64
+  *sll = __c11_atomic_fetch_max(sll, 42, memory_order_acquire);
+  // CHECK: atomicrmw min i64
+  *sll = __c11_atomic_fetch_min(sll, 42, memory_order_acquire);
+
+}
+
+void test_minmax_postop(int *si, unsigned *ui, unsigned short *us, signed char *sc, unsigned long long *ull) {
+  int val = 42;
+  // CHECK-LABEL: @test_minmax_postop
+
+  // CHECK: [[OLD:%.*]] = atomicrmw max i32* [[PTR:%.*]], i32 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp sgt i32 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
+  // CHECK: store i32 [[NEW]], i32*
+  *si = __atomic_max_fetch(si, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = atomicrmw min i32* [[PTR:%.*]], i32 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp slt i32 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
+  // CHECK: store i32 [[NEW]], i32*
+  *si = __atomic_min_fetch(si, 42, memory_order_release);
+  
+  // CHECK: [[OLD:%.*]] = atomicrmw umax i32* [[PTR:%.*]], i32 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp ugt i32 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
+  // CHECK: store i32 [[NEW]], i32*
+  *ui = __atomic_max_fetch(ui, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = atomicrmw umin i32* [[PTR:%.*]], i32 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp ult i32 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
+  // CHECK: store i32 [[NEW]], i32*
+  *ui = __atomic_min_fetch(ui, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = atomicrmw umin i16* [[PTR:%.*]], i16 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp ult i16 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i16 [[OLD]], i16 [[RHS]]
+  // CHECK: store i16 [[NEW]], i16*
+  *us = __atomic_min_fetch(us, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = atomicrmw min i8* [[PTR:%.*]], i8 [[RHS:%.*]] release
+  // CHECK: [[TST:%.*]] = icmp slt i8 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i8 [[OLD]], i8 [[RHS]]
+  // CHECK: store i8 [[NEW]], i8*
+  *sc = __atomic_min_fetch(sc, 42, memory_order_release);
+
+  // CHECK: [[OLD:%.*]] = call i64 @__atomic_fetch_umin_8(i8* {{%.*}}, i64 [[RHS:%.*]],
+  // CHECK: [[TST:%.*]] = icmp ult i64 [[OLD]], [[RHS]]
+  // CHECK: [[NEW:%.*]] = select i1 [[TST]], i64 [[OLD]], i64 [[RHS]]
+  // CHECK: store i64 [[NEW]], i64*
+  *ull = __atomic_min_fetch(ull, 42, memory_order_release);
+
+}
+
 #endif

diff  --git a/clang/test/Sema/atomic-ops.c b/clang/test/Sema/atomic-ops.c
index 0340e2334894..cc9c6d9eb978 100644
--- a/clang/test/Sema/atomic-ops.c
+++ b/clang/test/Sema/atomic-ops.c
@@ -173,8 +173,8 @@ void f(_Atomic(int) *i, const _Atomic(int) *ci,
   __atomic_fetch_sub(P, 3, memory_order_seq_cst);
   __atomic_fetch_sub(D, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer or pointer}}
   __atomic_fetch_sub(s1, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer or pointer}}
-  __atomic_fetch_min(D, 3, memory_order_seq_cst); // expected-error {{must be a pointer to signed or unsigned 32-bit integer}}
-  __atomic_fetch_max(P, 3, memory_order_seq_cst); // expected-error {{must be a pointer to signed or unsigned 32-bit integer}}
+  __atomic_fetch_min(D, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer}}
+  __atomic_fetch_max(P, 3, memory_order_seq_cst); // expected-error {{must be a pointer to integer}}
   __atomic_fetch_max(p, 3);                       // expected-error {{too few arguments to function call, expected 3, have 2}}
 
   __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
@@ -354,6 +354,20 @@ void memory_checks(_Atomic(int) *Ap, int *p, int val) {
   (void)__c11_atomic_fetch_xor(Ap, val, memory_order_acq_rel);
   (void)__c11_atomic_fetch_xor(Ap, val, memory_order_seq_cst);
 
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_relaxed);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_acquire);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_consume);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_release);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_acq_rel);
+  (void)__c11_atomic_fetch_min(Ap, val, memory_order_seq_cst);
+
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_relaxed);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_acquire);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_consume);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_release);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_acq_rel);
+  (void)__c11_atomic_fetch_max(Ap, val, memory_order_seq_cst);
+
   (void)__c11_atomic_exchange(Ap, val, memory_order_relaxed);
   (void)__c11_atomic_exchange(Ap, val, memory_order_acquire);
   (void)__c11_atomic_exchange(Ap, val, memory_order_consume);
@@ -501,6 +515,20 @@ void memory_checks(_Atomic(int) *Ap, int *p, int val) {
   (void)__atomic_nand_fetch(p, val, memory_order_acq_rel);
   (void)__atomic_nand_fetch(p, val, memory_order_seq_cst);
 
+  (void)__atomic_max_fetch(p, val, memory_order_relaxed);
+  (void)__atomic_max_fetch(p, val, memory_order_acquire);
+  (void)__atomic_max_fetch(p, val, memory_order_consume);
+  (void)__atomic_max_fetch(p, val, memory_order_release);
+  (void)__atomic_max_fetch(p, val, memory_order_acq_rel);
+  (void)__atomic_max_fetch(p, val, memory_order_seq_cst);
+
+  (void)__atomic_min_fetch(p, val, memory_order_relaxed);
+  (void)__atomic_min_fetch(p, val, memory_order_acquire);
+  (void)__atomic_min_fetch(p, val, memory_order_consume);
+  (void)__atomic_min_fetch(p, val, memory_order_release);
+  (void)__atomic_min_fetch(p, val, memory_order_acq_rel);
+  (void)__atomic_min_fetch(p, val, memory_order_seq_cst);
+
   (void)__atomic_exchange_n(p, val, memory_order_relaxed);
   (void)__atomic_exchange_n(p, val, memory_order_acquire);
   (void)__atomic_exchange_n(p, val, memory_order_consume);

diff  --git a/clang/test/SemaOpenCL/atomic-ops.cl b/clang/test/SemaOpenCL/atomic-ops.cl
index 540520d4e325..d8021e33af65 100644
--- a/clang/test/SemaOpenCL/atomic-ops.cl
+++ b/clang/test/SemaOpenCL/atomic-ops.cl
@@ -73,7 +73,7 @@ void f(atomic_int *i, const atomic_int *ci,
   __opencl_atomic_fetch_add(d, 1, memory_order_seq_cst, memory_scope_work_group); // expected-error {{address argument to atomic operation must be a pointer to atomic integer or pointer ('__generic atomic_float *' (aka '__generic _Atomic(float) *') invalid)}}
   __opencl_atomic_fetch_and(i, 1, memory_order_seq_cst, memory_scope_work_group);
   __opencl_atomic_fetch_and(p, 1, memory_order_seq_cst, memory_scope_work_group);
-  __opencl_atomic_fetch_and(d, 1, memory_order_seq_cst, memory_scope_work_group); // expected-error {{address argument to bitwise atomic operation must be a pointer to atomic integer ('__generic atomic_float *' (aka '__generic _Atomic(float) *') invalid)}}
+  __opencl_atomic_fetch_and(d, 1, memory_order_seq_cst, memory_scope_work_group); // expected-error {{address argument to atomic operation must be a pointer to atomic integer ('__generic atomic_float *' (aka '__generic _Atomic(float) *') invalid)}}
 
   __opencl_atomic_fetch_min(i, 1, memory_order_seq_cst, memory_scope_work_group);
   __opencl_atomic_fetch_max(i, 1, memory_order_seq_cst, memory_scope_work_group);


        


More information about the cfe-commits mailing list