r284172 - Add 64-bit MS _Interlocked functions as builtins again

Albert Gutowski via cfe-commits cfe-commits at lists.llvm.org
Thu Oct 13 15:35:08 PDT 2016


Author: agutowski
Date: Thu Oct 13 17:35:07 2016
New Revision: 284172

URL: http://llvm.org/viewvc/llvm-project?rev=284172&view=rev
Log:
Add 64-bit MS _Interlocked functions as builtins again

Summary: Previously global 64-bit versions of _Interlocked functions broke buildbots on i386, so now I'm adding them as builtins for x86-64 and ARM only (should they be also on AArch64? I had problems with testing it for AArch64, so I left it)

Reviewers: hans, majnemer, mstorsjo, rnk

Subscribers: cfe-commits, aemerson

Differential Revision: https://reviews.llvm.org/D25576

Modified:
    cfe/trunk/include/clang/Basic/BuiltinsARM.def
    cfe/trunk/include/clang/Basic/BuiltinsX86_64.def
    cfe/trunk/lib/CodeGen/CGBuiltin.cpp
    cfe/trunk/lib/Headers/intrin.h
    cfe/trunk/test/CodeGen/ms-intrinsics.c

Modified: cfe/trunk/include/clang/Basic/BuiltinsARM.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsARM.def?rev=284172&r1=284171&r2=284172&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/BuiltinsARM.def (original)
+++ cfe/trunk/include/clang/Basic/BuiltinsARM.def Thu Oct 13 17:35:07 2016
@@ -138,6 +138,15 @@ TARGET_HEADER_BUILTIN(_BitScanReverse, "
 TARGET_HEADER_BUILTIN(_BitScanForward64, "UcULi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
 TARGET_HEADER_BUILTIN(_BitScanReverse64, "UcULi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
 
+TARGET_HEADER_BUILTIN(_InterlockedAnd64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64,    "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64,          "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+
 #undef BUILTIN
 #undef LANGBUILTIN
 #undef TARGET_HEADER_BUILTIN

Modified: cfe/trunk/include/clang/Basic/BuiltinsX86_64.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsX86_64.def?rev=284172&r1=284171&r2=284172&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/BuiltinsX86_64.def (original)
+++ cfe/trunk/include/clang/Basic/BuiltinsX86_64.def Thu Oct 13 17:35:07 2016
@@ -32,6 +32,15 @@ TARGET_HEADER_BUILTIN(_umul128, "ULLiULL
 
 TARGET_HEADER_BUILTIN(__faststorefence, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
 
+TARGET_HEADER_BUILTIN(_InterlockedAnd64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64,    "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64,          "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+
 TARGET_BUILTIN(__builtin_ia32_readeflags_u64, "ULLi", "n", "")
 TARGET_BUILTIN(__builtin_ia32_writeeflags_u64, "vULLi", "n", "")
 TARGET_BUILTIN(__builtin_ia32_cvtss2si64, "LLiV4f", "", "sse")

Modified: cfe/trunk/lib/CodeGen/CGBuiltin.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGBuiltin.cpp?rev=284172&r1=284171&r2=284172&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGBuiltin.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp Thu Oct 13 17:35:07 2016
@@ -463,6 +463,107 @@ CodeGenFunction::emitBuiltinObjectSize(c
   return Builder.CreateCall(F, {EmitScalarExpr(E), CI});
 }
 
+// Many of MSVC builtins are on both x64 and ARM; to avoid repeating code, we
+// handle them here.
+enum class CodeGenFunction::MSVCIntrin {
+  _BitScanForward,
+  _BitScanReverse,
+  _InterlockedAnd,
+  _InterlockedDecrement,
+  _InterlockedExchange,
+  _InterlockedExchangeAdd,
+  _InterlockedExchangeSub,
+  _InterlockedIncrement,
+  _InterlockedOr,
+  _InterlockedXor,
+};
+
+Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
+  const CallExpr *E) {
+  switch (BuiltinID) {
+  case MSVCIntrin::_BitScanForward:
+  case MSVCIntrin::_BitScanReverse: {
+    Value *ArgValue = EmitScalarExpr(E->getArg(1));
+
+    llvm::Type *ArgType = ArgValue->getType();
+    llvm::Type *IndexType =
+      EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
+    llvm::Type *ResultType = ConvertType(E->getType());
+
+    Value *ArgZero = llvm::Constant::getNullValue(ArgType);
+    Value *ResZero = llvm::Constant::getNullValue(ResultType);
+    Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
+
+    BasicBlock *Begin = Builder.GetInsertBlock();
+    BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
+    Builder.SetInsertPoint(End);
+    PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
+
+    Builder.SetInsertPoint(Begin);
+    Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
+    BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
+    Builder.CreateCondBr(IsZero, End, NotZero);
+    Result->addIncoming(ResZero, Begin);
+
+    Builder.SetInsertPoint(NotZero);
+    Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
+
+    if (BuiltinID == MSVCIntrin::_BitScanForward) {
+      Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+      Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
+      ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
+      Builder.CreateStore(ZeroCount, IndexAddress, false);
+    } else {
+      unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
+      Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
+
+      Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+      Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
+      ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
+      Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
+      Builder.CreateStore(Index, IndexAddress, false);
+    }
+    Builder.CreateBr(End);
+    Result->addIncoming(ResOne, NotZero);
+
+    Builder.SetInsertPoint(End);
+    return Result;
+  }
+  case MSVCIntrin::_InterlockedAnd:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
+  case MSVCIntrin::_InterlockedExchange:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
+  case MSVCIntrin::_InterlockedExchangeAdd:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
+  case MSVCIntrin::_InterlockedExchangeSub:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
+  case MSVCIntrin::_InterlockedOr:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
+  case MSVCIntrin::_InterlockedXor:
+    return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
+
+  case MSVCIntrin::_InterlockedDecrement: {
+    llvm::Type *IntTy = ConvertType(E->getType());
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Sub,
+      EmitScalarExpr(E->getArg(0)),
+      ConstantInt::get(IntTy, 1),
+      llvm::AtomicOrdering::SequentiallyConsistent);
+    return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1));
+  }
+  case MSVCIntrin::_InterlockedIncrement: {
+    llvm::Type *IntTy = ConvertType(E->getType());
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Add,
+      EmitScalarExpr(E->getArg(0)),
+      ConstantInt::get(IntTy, 1),
+      llvm::AtomicOrdering::SequentiallyConsistent);
+    return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
+  }
+  }
+  llvm_unreachable("Incorrect MSVC intrinsic!");
+}
+
 RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
                                         unsigned BuiltinID, const CallExpr *E,
                                         ReturnValueSlot ReturnValue) {
@@ -1978,7 +2079,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(
   case Builtin::BI_InterlockedExchange16:
   case Builtin::BI_InterlockedExchange:
   case Builtin::BI_InterlockedExchangePointer:
-    return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+    return RValue::get(
+        EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
   case Builtin::BI_InterlockedCompareExchangePointer: {
     llvm::Type *RTy;
     llvm::IntegerType *IntType =
@@ -2020,45 +2122,35 @@ RValue CodeGenFunction::EmitBuiltinExpr(
       return RValue::get(Builder.CreateExtractValue(CXI, 0));
   }
   case Builtin::BI_InterlockedIncrement16:
-  case Builtin::BI_InterlockedIncrement: {
-    llvm::Type *IntTy = ConvertType(E->getType());
-    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
-      AtomicRMWInst::Add,
-      EmitScalarExpr(E->getArg(0)),
-      ConstantInt::get(IntTy, 1),
-      llvm::AtomicOrdering::SequentiallyConsistent);
-    return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1)));
-  }
+  case Builtin::BI_InterlockedIncrement:
+    return RValue::get(
+        EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
   case Builtin::BI_InterlockedDecrement16:
-  case Builtin::BI_InterlockedDecrement: {
-    llvm::Type *IntTy = ConvertType(E->getType());
-    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
-      AtomicRMWInst::Sub,
-      EmitScalarExpr(E->getArg(0)),
-      ConstantInt::get(IntTy, 1),
-      llvm::AtomicOrdering::SequentiallyConsistent);
-    return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1)));
-  }
+  case Builtin::BI_InterlockedDecrement:
+    return RValue::get(
+        EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
   case Builtin::BI_InterlockedAnd8:
   case Builtin::BI_InterlockedAnd16:
   case Builtin::BI_InterlockedAnd:
-    return EmitBinaryAtomic(*this, AtomicRMWInst::And, E);
+    return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
   case Builtin::BI_InterlockedExchangeAdd8:
   case Builtin::BI_InterlockedExchangeAdd16:
   case Builtin::BI_InterlockedExchangeAdd:
-    return EmitBinaryAtomic(*this, AtomicRMWInst::Add, E);
+    return RValue::get(
+        EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
   case Builtin::BI_InterlockedExchangeSub8:
   case Builtin::BI_InterlockedExchangeSub16:
   case Builtin::BI_InterlockedExchangeSub:
-    return EmitBinaryAtomic(*this, AtomicRMWInst::Sub, E);
+    return RValue::get(
+        EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
   case Builtin::BI_InterlockedOr8:
   case Builtin::BI_InterlockedOr16:
   case Builtin::BI_InterlockedOr:
-    return EmitBinaryAtomic(*this, AtomicRMWInst::Or, E);
+    return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
   case Builtin::BI_InterlockedXor8:
   case Builtin::BI_InterlockedXor16:
   case Builtin::BI_InterlockedXor:
-    return EmitBinaryAtomic(*this, AtomicRMWInst::Xor, E);
+    return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
   case Builtin::BI__readfsdword: {
     llvm::Type *IntTy = ConvertType(E->getType());
     Value *IntToPtr =
@@ -2641,68 +2733,6 @@ static Value *EmitTargetArchBuiltinExpr(
   }
 }
 
-// Many of MSVC builtins are on both x64 and ARM; to avoid repeating code, we
-// handle them here.
-enum class CodeGenFunction::MSVCIntrin {
-  _BitScanForward,
-  _BitScanReverse
-};
-
-Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
-                                            const CallExpr *E) {
-  switch (BuiltinID) {
-  case MSVCIntrin::_BitScanForward:
-  case MSVCIntrin::_BitScanReverse: {
-    Value *ArgValue = EmitScalarExpr(E->getArg(1));
-
-    llvm::Type *ArgType = ArgValue->getType();
-    llvm::Type *IndexType =
-        EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
-    llvm::Type *ResultType = ConvertType(E->getType());
-
-    Value *ArgZero = llvm::Constant::getNullValue(ArgType);
-    Value *ResZero = llvm::Constant::getNullValue(ResultType);
-    Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
-
-    BasicBlock *Begin = Builder.GetInsertBlock();
-    BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
-    Builder.SetInsertPoint(End);
-    PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
-
-    Builder.SetInsertPoint(Begin);
-    Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
-    BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
-    Builder.CreateCondBr(IsZero, End, NotZero);
-    Result->addIncoming(ResZero, Begin);
-
-    Builder.SetInsertPoint(NotZero);
-    Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
-
-    if (BuiltinID == MSVCIntrin::_BitScanForward) {
-      Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
-      Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
-      ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
-      Builder.CreateStore(ZeroCount, IndexAddress, false);
-    } else {
-      unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
-      Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
-
-      Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
-      Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
-      ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
-      Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
-      Builder.CreateStore(Index, IndexAddress, false);
-    }
-    Builder.CreateBr(End);
-    Result->addIncoming(ResOne, NotZero);
-
-    Builder.SetInsertPoint(End);
-    return Result;
-  }
-  }
-  llvm_unreachable("Incorrect MSVC intrinsic!");
-}
-
 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
                                               const CallExpr *E) {
   if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
@@ -4633,6 +4663,23 @@ Value *CodeGenFunction::EmitARMBuiltinEx
   case ARM::BI_BitScanReverse:
   case ARM::BI_BitScanReverse64:
     return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+
+  case ARM::BI_InterlockedAnd64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+  case ARM::BI_InterlockedExchange64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+  case ARM::BI_InterlockedExchangeAdd64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+  case ARM::BI_InterlockedExchangeSub64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+  case ARM::BI_InterlockedOr64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+  case ARM::BI_InterlockedXor64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+  case ARM::BI_InterlockedDecrement64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+  case ARM::BI_InterlockedIncrement64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
   }
 
   // Get the last argument, which specifies the vector type.
@@ -7701,6 +7748,24 @@ Value *CodeGenFunction::EmitX86BuiltinEx
   case X86::BI_BitScanReverse:
   case X86::BI_BitScanReverse64:
     return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+
+  case X86::BI_InterlockedAnd64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+  case X86::BI_InterlockedExchange64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+  case X86::BI_InterlockedExchangeAdd64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+  case X86::BI_InterlockedExchangeSub64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+  case X86::BI_InterlockedOr64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+  case X86::BI_InterlockedXor64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+  case X86::BI_InterlockedDecrement64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+  case X86::BI_InterlockedIncrement64:
+    return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+
   case X86::BI_AddressOfReturnAddress: {
     Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
     return Builder.CreateCall(F);

Modified: cfe/trunk/lib/Headers/intrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/intrin.h?rev=284172&r1=284171&r2=284172&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/intrin.h (original)
+++ cfe/trunk/lib/Headers/intrin.h Thu Oct 13 17:35:07 2016
@@ -387,25 +387,13 @@ void *_InterlockedCompareExchangePointer
                                          void *_Exchange, void *_Comparand);
 void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
                                             void *_Exchange, void *_Comparand);
-static __inline__
-__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
-static __inline__
-__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
-static __inline__
-__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
 void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
-static __inline__
-__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
 long _InterlockedOr_np(long volatile *_Value, long _Mask);
 short _InterlockedOr16_np(short volatile *_Value, short _Mask);
-static __inline__
-__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
 __int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
 char _InterlockedOr8_np(char volatile *_Value, char _Mask);
 long _InterlockedXor_np(long volatile *_Value, long _Mask);
 short _InterlockedXor16_np(short volatile *_Value, short _Mask);
-static __inline__
-__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
 __int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
 char _InterlockedXor8_np(char volatile *_Value, char _Mask);
 unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
@@ -428,6 +416,27 @@ unsigned __int64 _umul128(unsigned __int
 
 #endif /* __x86_64__ */
 
+#if defined(__x86_64__) || defined(__arm__)
+
+static __inline__
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
+static __inline__
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
+static __inline__
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
+static __inline__
+__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
+
+#endif
+
 /*----------------------------------------------------------------------------*\
 |* Bit Counting and Testing
 \*----------------------------------------------------------------------------*/
@@ -545,14 +554,6 @@ static __inline__ long __DEFAULT_FN_ATTR
 _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value) {
   return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
-  return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value) {
   return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
@@ -567,15 +568,6 @@ _InterlockedExchangeAdd64_rel(__int64 vo
 }
 #endif
 /*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Sub
-\*----------------------------------------------------------------------------*/
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
-  return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
-}
-#endif
-/*----------------------------------------------------------------------------*\
 |* Interlocked Increment
 \*----------------------------------------------------------------------------*/
 #if defined(__arm__) || defined(__aarch64__)
@@ -603,14 +595,6 @@ static __inline__ long __DEFAULT_FN_ATTR
 _InterlockedIncrement_rel(long volatile *_Value) {
   return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64(__int64 volatile *_Value) {
-  return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedIncrement64_acq(__int64 volatile *_Value) {
   return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
@@ -652,14 +636,6 @@ static __inline__ long __DEFAULT_FN_ATTR
 _InterlockedDecrement_rel(long volatile *_Value) {
   return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64(__int64 volatile *_Value) {
-  return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedDecrement64_acq(__int64 volatile *_Value) {
   return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
@@ -713,14 +689,6 @@ static __inline__ long __DEFAULT_FN_ATTR
 _InterlockedAnd_rel(long volatile *_Value, long _Mask) {
   return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask) {
   return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
@@ -774,14 +742,6 @@ static __inline__ long __DEFAULT_FN_ATTR
 _InterlockedOr_rel(long volatile *_Value, long _Mask) {
   return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask) {
   return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
@@ -835,14 +795,6 @@ static __inline__ long __DEFAULT_FN_ATTR
 _InterlockedXor_rel(long volatile *_Value, long _Mask) {
   return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
-  return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask) {
   return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
@@ -905,15 +857,6 @@ _InterlockedExchange_rel(long volatile *
   __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
   return _Value;
 }
-#endif
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
-  __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
-  return _Value;
-}
-#endif
-#if defined(__arm__) || defined(__aarch64__)
 static __inline__ __int64 __DEFAULT_FN_ATTRS
 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value) {
   __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);

Modified: cfe/trunk/test/CodeGen/ms-intrinsics.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/ms-intrinsics.c?rev=284172&r1=284171&r2=284172&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/ms-intrinsics.c (original)
+++ cfe/trunk/test/CodeGen/ms-intrinsics.c Thu Oct 13 17:35:07 2016
@@ -334,3 +334,72 @@ long test_InterlockedDecrement(long vola
 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
 // CHECK: ret i32 [[RESULT]]
 // CHECK: }
+
+#if defined(__x86_64__) || defined(__arm__)
+__int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchange64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchangeAdd64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedExchangeSub64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedOr64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedXor64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) {
+  return _InterlockedAnd64(value, mask);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-X64:   [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst
+// CHECK-ARM-X64:   ret i64 [[RESULT:%[0-9]+]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedIncrement64(__int64 volatile *Addend) {
+  return _InterlockedIncrement64(Addend);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-X64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst
+// CHECK-ARM-X64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
+// CHECK-ARM-X64: ret i64 [[RESULT]]
+// CHECK-ARM-X64: }
+
+__int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
+  return _InterlockedDecrement64(Addend);
+}
+// CHECK-ARM-X64: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-X64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst
+// CHECK-ARM-X64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
+// CHECK-ARM-X64: ret i64 [[RESULT]]
+// CHECK-ARM-X64: }
+
+#endif




More information about the cfe-commits mailing list