r281378 - Add bunch of _Interlocked builtins

Albert Gutowski via cfe-commits cfe-commits at lists.llvm.org
Tue Sep 13 12:43:33 PDT 2016


Author: agutowski
Date: Tue Sep 13 14:43:33 2016
New Revision: 281378

URL: http://llvm.org/viewvc/llvm-project?rev=281378&view=rev
Log:
Add bunch of _Interlocked builtins

Reviewers: compnerd, thakis, Prazek, majnemer, rnk

Subscribers: cfe-commits

Differential Revision: https://reviews.llvm.org/D24153

Modified:
    cfe/trunk/include/clang/Basic/Builtins.def
    cfe/trunk/lib/CodeGen/CGBuiltin.cpp
    cfe/trunk/lib/Headers/intrin.h
    cfe/trunk/test/CodeGen/ms-intrinsics.c
    cfe/trunk/test/CodeGen/pr27892.c

Modified: cfe/trunk/include/clang/Basic/Builtins.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/Builtins.def?rev=281378&r1=281377&r2=281378&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/Builtins.def (original)
+++ cfe/trunk/include/clang/Basic/Builtins.def Tue Sep 13 14:43:33 2016
@@ -720,13 +720,42 @@ LANGBUILTIN(_exception_info,  "v*",  "n"
 LANGBUILTIN(__abnormal_termination, "i", "n", ALL_MS_LANGUAGES)
 LANGBUILTIN(_abnormal_termination,  "i", "n", ALL_MS_LANGUAGES)
 LANGBUILTIN(__GetExceptionInfo, "v*.", "ntu", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedCompareExchange, "LiLiD*LiLi", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedAnd8,   "ccD*c",        "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedAnd16,  "ssD*s",        "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedAnd,    "LiLiD*Li",     "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedAnd64,  "LLiLLiD*LLi",  "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedCompareExchange8,   "ccD*cc",         "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedCompareExchange16,  "ssD*ss",         "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedCompareExchange,    "LiLiD*LiLi",     "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedCompareExchange64,  "LLiLLiD*LLiLLi", "n", ALL_MS_LANGUAGES)
 LANGBUILTIN(_InterlockedCompareExchangePointer, "v*v*D*v*v*", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedDecrement,       "LiLiD*",     "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchangeAdd,     "LiLiD*Li",   "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchange,        "LiLiD*Li",   "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchangePointer, "v*v*D*v*",   "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedIncrement,       "LiLiD*",     "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedDecrement16,        "ssD*",     "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedDecrement,          "LiLiD*",   "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedDecrement64,        "LLiLLiD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchange,           "LiLiD*Li",     "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchange8,          "ccD*c",        "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchange16,         "ssD*s",        "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchange64,         "LLiLLiD*LLi",  "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeAdd8,       "ccD*c",          "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeAdd16,      "ssD*s",          "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeAdd,        "LiLiD*Li",       "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeAdd64,      "LLiLLiD*LLi",    "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangePointer,    "v*v*D*v*",   "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeSub8,   "ccD*c",        "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeSub16,  "ssD*s",        "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeSub,    "LiLiD*Li",     "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeSub64,  "LLiLLiD*LLi",  "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedIncrement16,        "ssD*",     "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedIncrement,          "LiLiD*",   "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedIncrement64,        "LLiLLiD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedOr8,  "ccD*c",        "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedOr16, "ssD*s",        "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedOr,   "LiLiD*Li",     "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedOr64, "LLiLLiD*LLi",  "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedXor8,  "ccD*c",       "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedXor16, "ssD*s",       "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedXor,   "LiLiD*Li",    "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedXor64, "LLiLLiD*LLi", "n", ALL_MS_LANGUAGES)
 LANGBUILTIN(__noop,           "i.",  "n", ALL_MS_LANGUAGES)
 LANGBUILTIN(__popcnt16, "UsUs",     "nc", ALL_MS_LANGUAGES)
 LANGBUILTIN(__popcnt,   "UiUi",     "nc", ALL_MS_LANGUAGES)

Modified: cfe/trunk/lib/CodeGen/CGBuiltin.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGBuiltin.cpp?rev=281378&r1=281377&r2=281378&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGBuiltin.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp Tue Sep 13 14:43:33 2016
@@ -1963,7 +1963,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(
                     EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
                     Call->getCalleeDecl(), EmitScalarExpr(Chain));
   }
+  case Builtin::BI_InterlockedExchange8:
+  case Builtin::BI_InterlockedExchange16:
   case Builtin::BI_InterlockedExchange:
+  case Builtin::BI_InterlockedExchange64:
   case Builtin::BI_InterlockedExchangePointer:
     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
   case Builtin::BI_InterlockedCompareExchangePointer: {
@@ -1993,7 +1996,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(
                                                                          0),
                                               RTy));
   }
-  case Builtin::BI_InterlockedCompareExchange: {
+  case Builtin::BI_InterlockedCompareExchange8:
+  case Builtin::BI_InterlockedCompareExchange16:
+  case Builtin::BI_InterlockedCompareExchange:
+  case Builtin::BI_InterlockedCompareExchange64: {
     AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
         EmitScalarExpr(E->getArg(0)),
         EmitScalarExpr(E->getArg(2)),
@@ -2003,35 +2009,53 @@ RValue CodeGenFunction::EmitBuiltinExpr(
       CXI->setVolatile(true);
       return RValue::get(Builder.CreateExtractValue(CXI, 0));
   }
-  case Builtin::BI_InterlockedIncrement: {
+  case Builtin::BI_InterlockedIncrement16:
+  case Builtin::BI_InterlockedIncrement:
+  case Builtin::BI_InterlockedIncrement64: {
     llvm::Type *IntTy = ConvertType(E->getType());
     AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
       AtomicRMWInst::Add,
       EmitScalarExpr(E->getArg(0)),
       ConstantInt::get(IntTy, 1),
       llvm::AtomicOrdering::SequentiallyConsistent);
-    RMWI->setVolatile(true);
     return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1)));
   }
-  case Builtin::BI_InterlockedDecrement: {
+  case Builtin::BI_InterlockedDecrement16:
+  case Builtin::BI_InterlockedDecrement:
+  case Builtin::BI_InterlockedDecrement64: {
     llvm::Type *IntTy = ConvertType(E->getType());
     AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
       AtomicRMWInst::Sub,
       EmitScalarExpr(E->getArg(0)),
       ConstantInt::get(IntTy, 1),
       llvm::AtomicOrdering::SequentiallyConsistent);
-    RMWI->setVolatile(true);
     return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1)));
   }
-  case Builtin::BI_InterlockedExchangeAdd: {
-    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
-      AtomicRMWInst::Add,
-      EmitScalarExpr(E->getArg(0)),
-      EmitScalarExpr(E->getArg(1)),
-      llvm::AtomicOrdering::SequentiallyConsistent);
-    RMWI->setVolatile(true);
-    return RValue::get(RMWI);
-  }
+  case Builtin::BI_InterlockedAnd8:
+  case Builtin::BI_InterlockedAnd16:
+  case Builtin::BI_InterlockedAnd:
+  case Builtin::BI_InterlockedAnd64:
+    return EmitBinaryAtomic(*this, AtomicRMWInst::And, E);
+  case Builtin::BI_InterlockedExchangeAdd8:
+  case Builtin::BI_InterlockedExchangeAdd16:
+  case Builtin::BI_InterlockedExchangeAdd:
+  case Builtin::BI_InterlockedExchangeAdd64:
+    return EmitBinaryAtomic(*this, AtomicRMWInst::Add, E);
+  case Builtin::BI_InterlockedExchangeSub8:
+  case Builtin::BI_InterlockedExchangeSub16:
+  case Builtin::BI_InterlockedExchangeSub:
+  case Builtin::BI_InterlockedExchangeSub64:
+    return EmitBinaryAtomic(*this, AtomicRMWInst::Sub, E);
+  case Builtin::BI_InterlockedOr8:
+  case Builtin::BI_InterlockedOr16:
+  case Builtin::BI_InterlockedOr:
+  case Builtin::BI_InterlockedOr64:
+    return EmitBinaryAtomic(*this, AtomicRMWInst::Or, E);
+  case Builtin::BI_InterlockedXor8:
+  case Builtin::BI_InterlockedXor16:
+  case Builtin::BI_InterlockedXor:
+  case Builtin::BI_InterlockedXor64:
+    return EmitBinaryAtomic(*this, AtomicRMWInst::Xor, E);
   case Builtin::BI__readfsdword: {
     llvm::Type *IntTy = ConvertType(E->getType());
     Value *IntToPtr =

Modified: cfe/trunk/lib/Headers/intrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/intrin.h?rev=281378&r1=281377&r2=281378&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/intrin.h (original)
+++ cfe/trunk/lib/Headers/intrin.h Tue Sep 13 14:43:33 2016
@@ -535,177 +535,6 @@ _interlockedbittestandset64(__int64 vola
 }
 #endif
 /*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Add
-\*----------------------------------------------------------------------------*/
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) {
-  return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
-  return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
-  return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Sub
-\*----------------------------------------------------------------------------*/
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub8(char volatile *_Subend, char _Value) {
-  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub16(short volatile *_Subend, short _Value) {
-  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub(long volatile *_Subend, long _Value) {
-  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
-  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Increment
-\*----------------------------------------------------------------------------*/
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedIncrement16(short volatile *_Value) {
-  return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64(__int64 volatile *_Value) {
-  return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Decrement
-\*----------------------------------------------------------------------------*/
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedDecrement16(short volatile *_Value) {
-  return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64(__int64 volatile *_Value) {
-  return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked And
-\*----------------------------------------------------------------------------*/
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedAnd8(char volatile *_Value, char _Mask) {
-  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedAnd16(short volatile *_Value, short _Mask) {
-  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedAnd(long volatile *_Value, long _Mask) {
-  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Or
-\*----------------------------------------------------------------------------*/
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedOr8(char volatile *_Value, char _Mask) {
-  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedOr16(short volatile *_Value, short _Mask) {
-  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedOr(long volatile *_Value, long _Mask) {
-  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Xor
-\*----------------------------------------------------------------------------*/
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedXor8(char volatile *_Value, char _Mask) {
-  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedXor16(short volatile *_Value, short _Mask) {
-  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedXor(long volatile *_Value, long _Mask) {
-  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange
-\*----------------------------------------------------------------------------*/
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchange8(char volatile *_Target, char _Value) {
-  __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
-  return _Value;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchange16(short volatile *_Target, short _Value) {
-  __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
-  return _Value;
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
-  __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
-  return _Value;
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange
-\*----------------------------------------------------------------------------*/
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8(char volatile *_Destination,
-                             char _Exchange, char _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-                            __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-  return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16(short volatile *_Destination,
-                              short _Exchange, short _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-                            __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-  return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
-                            __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-  return _Comparand;
-}
-/*----------------------------------------------------------------------------*\
 |* Barriers
 \*----------------------------------------------------------------------------*/
 static __inline__ void __DEFAULT_FN_ATTRS

Modified: cfe/trunk/test/CodeGen/ms-intrinsics.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/ms-intrinsics.c?rev=281378&r1=281377&r2=281378&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/ms-intrinsics.c (original)
+++ cfe/trunk/test/CodeGen/ms-intrinsics.c Tue Sep 13 14:43:33 2016
@@ -41,15 +41,6 @@ void *test_InterlockedCompareExchangePoi
 // CHECK:   ret i8* %[[RESULT:[0-9]+]]
 // CHECK: }
 
-long test_InterlockedExchange(long *Target, long Value) {
-  return _InterlockedExchange(Target, Value);
-}
-
-// CHECK: define{{.*}}i32 @test_InterlockedExchange(i32* {{[a-z_ ]*}}%Target, i32 %Value){{.*}}{
-// CHECK:   %[[EXCHANGE:[0-9]+]] = atomicrmw xchg i32* %Target, i32 %Value seq_cst
-// CHECK:   ret i32 %[[EXCHANGE:[0-9]+]]
-// CHECK: }
-
 #if defined(__i386__)
 long test__readfsdword(unsigned long Offset) {
   return __readfsdword(Offset);
@@ -71,3 +62,284 @@ unsigned __int64 test__umulh(unsigned __
 
 #endif
 
+char test_InterlockedExchange8(char volatile *value, char mask) {
+  return _InterlockedExchange8(value, mask);
+}
+// CHECK: define{{.*}}i8 @test_InterlockedExchange8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask seq_cst
+// CHECK:   ret i8 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+short test_InterlockedExchange16(short volatile *value, short mask) {
+  return _InterlockedExchange16(value, mask);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedExchange16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask seq_cst
+// CHECK:   ret i16 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+long test_InterlockedExchange(long volatile *value, long mask) {
+  return _InterlockedExchange(value, mask);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedExchange(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask seq_cst
+// CHECK:   ret i32 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+__int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchange64(value, mask);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst
+// CHECK:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+char test_InterlockedExchangeAdd8(char volatile *value, char mask) {
+  return _InterlockedExchangeAdd8(value, mask);
+}
+// CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask seq_cst
+// CHECK:   ret i8 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+short test_InterlockedExchangeAdd16(short volatile *value, short mask) {
+  return _InterlockedExchangeAdd16(value, mask);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask seq_cst
+// CHECK:   ret i16 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+long test_InterlockedExchangeAdd(long volatile *value, long mask) {
+  return _InterlockedExchangeAdd(value, mask);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask seq_cst
+// CHECK:   ret i32 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+__int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchangeAdd64(value, mask);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst
+// CHECK:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+char test_InterlockedExchangeSub8(char volatile *value, char mask) {
+  return _InterlockedExchangeSub8(value, mask);
+}
+// CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i8* %value, i8 %mask seq_cst
+// CHECK:   ret i8 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+short test_InterlockedExchangeSub16(short volatile *value, short mask) {
+  return _InterlockedExchangeSub16(value, mask);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i16* %value, i16 %mask seq_cst
+// CHECK:   ret i16 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+long test_InterlockedExchangeSub(long volatile *value, long mask) {
+  return _InterlockedExchangeSub(value, mask);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i32* %value, i32 %mask seq_cst
+// CHECK:   ret i32 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+__int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchangeSub64(value, mask);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst
+// CHECK:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+char test_InterlockedOr8(char volatile *value, char mask) {
+  return _InterlockedOr8(value, mask);
+}
+// CHECK: define{{.*}}i8 @test_InterlockedOr8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask seq_cst
+// CHECK:   ret i8 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+short test_InterlockedOr16(short volatile *value, short mask) {
+  return _InterlockedOr16(value, mask);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedOr16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask seq_cst
+// CHECK:   ret i16 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+long test_InterlockedOr(long volatile *value, long mask) {
+  return _InterlockedOr(value, mask);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedOr(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask seq_cst
+// CHECK:   ret i32 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+__int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedOr64(value, mask);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst
+// CHECK:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+char test_InterlockedXor8(char volatile *value, char mask) {
+  return _InterlockedXor8(value, mask);
+}
+// CHECK: define{{.*}}i8 @test_InterlockedXor8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask seq_cst
+// CHECK:   ret i8 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+short test_InterlockedXor16(short volatile *value, short mask) {
+  return _InterlockedXor16(value, mask);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedXor16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask seq_cst
+// CHECK:   ret i16 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+long test_InterlockedXor(long volatile *value, long mask) {
+  return _InterlockedXor(value, mask);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedXor(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask seq_cst
+// CHECK:   ret i32 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+__int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedXor64(value, mask);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst
+// CHECK:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+char test_InterlockedAnd8(char volatile *value, char mask) {
+  return _InterlockedAnd8(value, mask);
+}
+// CHECK: define{{.*}}i8 @test_InterlockedAnd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask seq_cst
+// CHECK:   ret i8 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+short test_InterlockedAnd16(short volatile *value, short mask) {
+  return _InterlockedAnd16(value, mask);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedAnd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask seq_cst
+// CHECK:   ret i16 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+long test_InterlockedAnd(long volatile *value, long mask) {
+  return _InterlockedAnd(value, mask);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedAnd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask seq_cst
+// CHECK:   ret i32 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+__int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedAnd64(value, mask);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst
+// CHECK:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK: }
+
+char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange, char Comperand) {
+  return _InterlockedCompareExchange8(Destination, Exchange, Comperand);
+}
+// CHECK: define{{.*}}i8 @test_InterlockedCompareExchange8(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange seq_cst seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK: ret i8 [[RESULT]]
+// CHECK: }
+
+short test_InterlockedCompareExchange16(short volatile *Destination, short Exchange, short Comperand) {
+  return _InterlockedCompareExchange16(Destination, Exchange, Comperand);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedCompareExchange16(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange seq_cst seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK: ret i16 [[RESULT]]
+// CHECK: }
+
+long test_InterlockedCompareExchange(long volatile *Destination, long Exchange, long Comperand) {
+  return _InterlockedCompareExchange(Destination, Exchange, Comperand);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
+// CHECK: ret i32 [[RESULT]]
+// CHECK: }
+
+__int64 test_InterlockedCompareExchange64(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
+  return _InterlockedCompareExchange64(Destination, Exchange, Comperand);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedCompareExchange64(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange seq_cst seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
+// CHECK: ret i64 [[RESULT]]
+// CHECK: }
+
+short test_InterlockedIncrement16(short volatile *Addend) {
+  return _InterlockedIncrement16(Addend);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedIncrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
+// CHECK: ret i16 [[RESULT]]
+// CHECK: }
+
+long test_InterlockedIncrement(long volatile *Addend) {
+  return _InterlockedIncrement(Addend);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedIncrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
+// CHECK: ret i32 [[RESULT]]
+// CHECK: }
+
+__int64 test_InterlockedIncrement64(__int64 volatile *Addend) {
+  return _InterlockedIncrement64(Addend);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
+// CHECK: ret i64 [[RESULT]]
+// CHECK: }
+
+short test_InterlockedDecrement16(short volatile *Addend) {
+  return _InterlockedDecrement16(Addend);
+}
+// CHECK: define{{.*}}i16 @test_InterlockedDecrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
+// CHECK: ret i16 [[RESULT]]
+// CHECK: }
+
+long test_InterlockedDecrement(long volatile *Addend) {
+  return _InterlockedDecrement(Addend);
+}
+// CHECK: define{{.*}}i32 @test_InterlockedDecrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
+// CHECK: ret i32 [[RESULT]]
+// CHECK: }
+
+__int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
+  return _InterlockedDecrement64(Addend);
+}
+// CHECK: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst
+// CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
+// CHECK: ret i64 [[RESULT]]
+// CHECK: }

Modified: cfe/trunk/test/CodeGen/pr27892.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/pr27892.c?rev=281378&r1=281377&r2=281378&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/pr27892.c (original)
+++ cfe/trunk/test/CodeGen/pr27892.c Tue Sep 13 14:43:33 2016
@@ -7,7 +7,7 @@ long test1(long *p) {
 // CHECK:   %[[p_addr:.*]] = alloca i64*, align 8
 // CHECK:   store i64* %p, i64** %[[p_addr]], align 8
 // CHECK:   %[[p_load:.*]] = load i64*, i64** %[[p_addr]], align 8
-// CHECK:   %[[atomic_add:.*]] = atomicrmw volatile add i64* %[[p_load]], i64 1 seq_cst
+// CHECK:   %[[atomic_add:.*]] = atomicrmw add i64* %[[p_load]], i64 1 seq_cst
 // CHECK:   %[[res:.*]] = add i64 %[[atomic_add]], 1
 // CHECK:   ret i64 %[[res]]
 
@@ -18,6 +18,6 @@ long test2(long *p) {
 // CHECK:   %[[p_addr:.*]] = alloca i64*, align 8
 // CHECK:   store i64* %p, i64** %[[p_addr]], align 8
 // CHECK:   %[[p_load:.*]] = load i64*, i64** %[[p_addr]], align 8
-// CHECK:   %[[atomic_sub:.*]] = atomicrmw volatile sub i64* %[[p_load]], i64 1 seq_cst
+// CHECK:   %[[atomic_sub:.*]] = atomicrmw sub i64* %[[p_load]], i64 1 seq_cst
 // CHECK:   %[[res:.*]] = sub i64 %[[atomic_sub]], 1
 // CHECK:   ret i64 %[[res]]




More information about the cfe-commits mailing list