r201901 - Reapply 201734 but with appropriate gcc compatibility

Warren Hunt whunt at google.com
Fri Feb 21 15:08:53 PST 2014


Author: whunt
Date: Fri Feb 21 17:08:53 2014
New Revision: 201901

URL: http://llvm.org/viewvc/llvm-project?rev=201901&view=rev
Log:
Reapply 201734 but with appropriate gcc compatibility
Because GCC incorrectly defines _mm_prefetch to take anything that casts 
to void*, people have started using that behavior.  The previous patch 
that made _mm_prefetch actually take a const char * broke compatibility 
with existing code.  This update to the patch leaves the macro that 
defines _mm_prefetch with the (void*) cast when _MSC_VER is not defined.


Modified:
    cfe/trunk/include/clang/Basic/Builtins.def
    cfe/trunk/include/clang/Basic/BuiltinsX86.def
    cfe/trunk/include/clang/Sema/Sema.h
    cfe/trunk/lib/CodeGen/CGBuiltin.cpp
    cfe/trunk/lib/Headers/Intrin.h
    cfe/trunk/lib/Headers/xmmintrin.h
    cfe/trunk/lib/Sema/SemaChecking.cpp

Modified: cfe/trunk/include/clang/Basic/Builtins.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/Builtins.def?rev=201901&r1=201900&r2=201901&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/Builtins.def (original)
+++ cfe/trunk/include/clang/Basic/Builtins.def Fri Feb 21 17:08:53 2014
@@ -679,6 +679,10 @@ LANGBUILTIN(_alloca,      "v*z", "n", AL
 LANGBUILTIN(__assume,     "vb",  "n", ALL_MS_LANGUAGES)
 LANGBUILTIN(__noop,       "v.",  "n", ALL_MS_LANGUAGES)
 LANGBUILTIN(__debugbreak, "v",   "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedCompareExchange, "LiLiD*LiLi", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedIncrement, "LiLiD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedDecrement, "LiLiD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeAdd, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
 
 // C99 library functions
 // C99 stdlib.h

Modified: cfe/trunk/include/clang/Basic/BuiltinsX86.def
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Basic/BuiltinsX86.def?rev=201901&r1=201900&r2=201901&view=diff
==============================================================================
--- cfe/trunk/include/clang/Basic/BuiltinsX86.def (original)
+++ cfe/trunk/include/clang/Basic/BuiltinsX86.def Fri Feb 21 17:08:53 2014
@@ -59,6 +59,7 @@ BUILTIN(__builtin_ia32_pswapdsi, "V2iV2i
 // All MMX instructions will be generated via builtins. Any MMX vector
 // types (<1 x i64>, <2 x i32>, etc.) that aren't used by these builtins will be
 // expanded by the back-end.
+BUILTIN(_mm_prefetch, "vcC*i", "nc")
 BUILTIN(__builtin_ia32_emms, "v", "")
 BUILTIN(__builtin_ia32_paddb, "V8cV8cV8c", "")
 BUILTIN(__builtin_ia32_paddw, "V4sV4sV4s", "")

Modified: cfe/trunk/include/clang/Sema/Sema.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/Sema/Sema.h?rev=201901&r1=201900&r2=201901&view=diff
==============================================================================
--- cfe/trunk/include/clang/Sema/Sema.h (original)
+++ cfe/trunk/include/clang/Sema/Sema.h Fri Feb 21 17:08:53 2014
@@ -7897,6 +7897,7 @@ private:
   bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
   bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
   bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+  bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 
   bool SemaBuiltinVAStart(CallExpr *TheCall);
   bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
@@ -7911,6 +7912,7 @@ public:
 
 private:
   bool SemaBuiltinPrefetch(CallExpr *TheCall);
+  bool SemaBuiltinMMPrefetch(CallExpr *TheCall);
   bool SemaBuiltinObjectSize(CallExpr *TheCall);
   bool SemaBuiltinLongjmp(CallExpr *TheCall);
   ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);

Modified: cfe/trunk/lib/CodeGen/CGBuiltin.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGBuiltin.cpp?rev=201901&r1=201900&r2=201901&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGBuiltin.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp Fri Feb 21 17:08:53 2014
@@ -1500,6 +1500,42 @@ RValue CodeGenFunction::EmitBuiltinExpr(
     return RValue::get(EmitLValue(E->getArg(0)).getAddress());
   case Builtin::BI__noop:
     return RValue::get(0);
+  case Builtin::BI_InterlockedCompareExchange: {
+    AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
+        EmitScalarExpr(E->getArg(0)),
+        EmitScalarExpr(E->getArg(2)),
+        EmitScalarExpr(E->getArg(1)),
+        SequentiallyConsistent);
+      CXI->setVolatile(true);
+      return RValue::get(CXI);
+  }
+  case Builtin::BI_InterlockedIncrement: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Add,
+      EmitScalarExpr(E->getArg(0)),
+      ConstantInt::get(Int32Ty, 1),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
+  }
+  case Builtin::BI_InterlockedDecrement: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Sub,
+      EmitScalarExpr(E->getArg(0)),
+      ConstantInt::get(Int32Ty, 1),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
+  }
+  case Builtin::BI_InterlockedExchangeAdd: {
+    AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+      AtomicRMWInst::Add,
+      EmitScalarExpr(E->getArg(0)),
+      EmitScalarExpr(E->getArg(1)),
+      llvm::SequentiallyConsistent);
+    RMWI->setVolatile(true);
+    return RValue::get(RMWI);
+  }
   }
 
   // If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -4437,6 +4473,14 @@ Value *CodeGenFunction::EmitX86BuiltinEx
 
   switch (BuiltinID) {
   default: return 0;
+  case X86::BI_mm_prefetch: {
+    Value *Address = EmitScalarExpr(E->getArg(0));
+    Value *RW = ConstantInt::get(Int32Ty, 0);
+    Value *Locality = EmitScalarExpr(E->getArg(1));
+    Value *Data = ConstantInt::get(Int32Ty, 1);
+    Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+    return Builder.CreateCall4(F, Address, RW, Locality, Data);
+  }
   case X86::BI__builtin_ia32_vec_init_v8qi:
   case X86::BI__builtin_ia32_vec_init_v4hi:
   case X86::BI__builtin_ia32_vec_init_v2si:

Modified: cfe/trunk/lib/Headers/Intrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/Intrin.h?rev=201901&r1=201900&r2=201901&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/Intrin.h (original)
+++ cfe/trunk/lib/Headers/Intrin.h Fri Feb 21 17:08:53 2014
@@ -623,10 +623,6 @@ static __inline__ short __attribute__((_
 _InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
   return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedExchangeAdd(long volatile *_Addend, long _Value) {
-  return __atomic_add_fetch(_Addend, _Value, 0) - _Value;
-}
 #ifdef __x86_64__
 static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
@@ -661,10 +657,6 @@ static __inline__ short __attribute__((_
 _InterlockedIncrement16(short volatile *_Value) {
   return __atomic_add_fetch(_Value, 1, 0);
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedIncrement(long volatile *_Value) {
-  return __atomic_add_fetch(_Value, 1, 0);
-}
 #ifdef __x86_64__
 static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
 _InterlockedIncrement64(__int64 volatile *_Value) {
@@ -678,10 +670,6 @@ static __inline__ short __attribute__((_
 _InterlockedDecrement16(short volatile *_Value) {
   return __atomic_sub_fetch(_Value, 1, 0);
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedDecrement(long volatile *_Value) {
-  return __atomic_sub_fetch(_Value, 1, 0);
-}
 #ifdef __x86_64__
 static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
 _InterlockedDecrement64(__int64 volatile *_Value) {
@@ -791,12 +779,6 @@ _InterlockedCompareExchange16(short vola
   __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
   return _Comparand;
 }
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedCompareExchange(long volatile *_Destination,
-                            long _Exchange, long _Comparand) {
-  __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
-  return _Comparand;
-}
 #ifdef __x86_64__
 static __inline__ void *__attribute__((__always_inline__, __nodebug__))
 _InterlockedCompareExchangePointer(void *volatile *_Destination,

Modified: cfe/trunk/lib/Headers/xmmintrin.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/xmmintrin.h?rev=201901&r1=201900&r2=201901&view=diff
==============================================================================
--- cfe/trunk/lib/Headers/xmmintrin.h (original)
+++ cfe/trunk/lib/Headers/xmmintrin.h Fri Feb 21 17:08:53 2014
@@ -672,10 +672,9 @@ _mm_storer_ps(float *__p, __m128 __a)
 #define _MM_HINT_T2 1
 #define _MM_HINT_NTA 0
 
-/* FIXME: We have to #define this because "sel" must be a constant integer, and
-   Sema doesn't do any form of constant propagation yet. */
-
+#ifndef _MSC_VER
 #define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
+#endif
 
 static __inline__ void __attribute__((__always_inline__, __nodebug__))
 _mm_stream_pi(__m64 *__p, __m64 __a)

Modified: cfe/trunk/lib/Sema/SemaChecking.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Sema/SemaChecking.cpp?rev=201901&r1=201900&r2=201901&view=diff
==============================================================================
--- cfe/trunk/lib/Sema/SemaChecking.cpp (original)
+++ cfe/trunk/lib/Sema/SemaChecking.cpp Fri Feb 21 17:08:53 2014
@@ -317,6 +317,11 @@ Sema::CheckBuiltinFunctionCall(unsigned
         if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall))
           return ExprError();
         break;
+      case llvm::Triple::x86:
+      case llvm::Triple::x86_64:
+        if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
+          return ExprError();
+        break;
       default:
         break;
     }
@@ -655,6 +660,15 @@ bool Sema::CheckMipsBuiltinFunctionCall(
   return false;
 }
 
+bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+  switch (BuiltinID) {
+  case X86::BI_mm_prefetch:
+    return SemaBuiltinMMPrefetch(TheCall);
+    break;
+  }
+  return false;
+}
+
 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
 /// parameter with the FormatAttr's correct format_idx and firstDataArg.
 /// Returns true when the format fits the function and the FormatStringInfo has
@@ -1920,6 +1934,26 @@ bool Sema::SemaBuiltinPrefetch(CallExpr
 
   return false;
 }
+
+/// SemaBuiltinMMPrefetch - Handle _mm_prefetch.
+// This is declared to take (const char*, int)
+bool Sema::SemaBuiltinMMPrefetch(CallExpr *TheCall) {
+  Expr *Arg = TheCall->getArg(1);
+
+  // We can't check the value of a dependent argument.
+  if (Arg->isTypeDependent() || Arg->isValueDependent())
+    return false;
+
+  llvm::APSInt Result;
+  if (SemaBuiltinConstantArg(TheCall, 1, Result))
+    return true;
+
+  if (Result.getLimitedValue() > 3)
+    return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+        << "0" << "3" << Arg->getSourceRange();
+
+  return false;
+}
 
 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
 /// TheCall is a constant expression.





More information about the cfe-commits mailing list