r244063 - Add missing atomic libcall support.

James Y Knight jyknight at google.com
Wed Aug 5 09:57:36 PDT 2015


Author: jyknight
Date: Wed Aug  5 11:57:36 2015
New Revision: 244063

URL: http://llvm.org/viewvc/llvm-project?rev=244063&view=rev
Log:
Add missing atomic libcall support.

Support for emitting libcalls for __atomic_fetch_nand and
__atomic_{add,sub,and,or,xor,nand}_fetch was missing; add it, and some
test cases.

Differential Revision: http://reviews.llvm.org/D10847

Modified:
    cfe/trunk/docs/LanguageExtensions.rst
    cfe/trunk/lib/CodeGen/CGAtomic.cpp
    cfe/trunk/test/CodeGen/atomic-ops-libcall.c

Modified: cfe/trunk/docs/LanguageExtensions.rst
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/docs/LanguageExtensions.rst?rev=244063&r1=244062&r2=244063&view=diff
==============================================================================
--- cfe/trunk/docs/LanguageExtensions.rst (original)
+++ cfe/trunk/docs/LanguageExtensions.rst Wed Aug  5 11:57:36 2015
@@ -1715,6 +1715,9 @@ The macros ``__ATOMIC_RELAXED``, ``__ATO
 provided, with values corresponding to the enumerators of C11's
 ``memory_order`` enumeration.
 
+(Note that Clang additionally provides GCC-compatible ``__atomic_*``
+builtins)
+
 Low-level ARM exclusive memory builtins
 ---------------------------------------
 

Modified: cfe/trunk/lib/CodeGen/CGAtomic.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGAtomic.cpp?rev=244063&r1=244062&r2=244063&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/CGAtomic.cpp (original)
+++ cfe/trunk/lib/CodeGen/CGAtomic.cpp Wed Aug  5 11:57:36 2015
@@ -699,7 +699,7 @@ RValue CodeGenFunction::EmitAtomicExpr(A
 
   switch (E->getOp()) {
   case AtomicExpr::AO__c11_atomic_init:
-    llvm_unreachable("Already handled!");
+    llvm_unreachable("Already handled above with EmitAtomicInit!");
 
   case AtomicExpr::AO__c11_atomic_load:
   case AtomicExpr::AO__atomic_load_n:
@@ -785,20 +785,43 @@ RValue CodeGenFunction::EmitAtomicExpr(A
   if (UseLibcall) {
     bool UseOptimizedLibcall = false;
     switch (E->getOp()) {
+    case AtomicExpr::AO__c11_atomic_init:
+      llvm_unreachable("Already handled above with EmitAtomicInit!");
+
     case AtomicExpr::AO__c11_atomic_fetch_add:
     case AtomicExpr::AO__atomic_fetch_add:
     case AtomicExpr::AO__c11_atomic_fetch_and:
     case AtomicExpr::AO__atomic_fetch_and:
     case AtomicExpr::AO__c11_atomic_fetch_or:
     case AtomicExpr::AO__atomic_fetch_or:
+    case AtomicExpr::AO__atomic_fetch_nand:
     case AtomicExpr::AO__c11_atomic_fetch_sub:
     case AtomicExpr::AO__atomic_fetch_sub:
     case AtomicExpr::AO__c11_atomic_fetch_xor:
     case AtomicExpr::AO__atomic_fetch_xor:
+    case AtomicExpr::AO__atomic_add_fetch:
+    case AtomicExpr::AO__atomic_and_fetch:
+    case AtomicExpr::AO__atomic_nand_fetch:
+    case AtomicExpr::AO__atomic_or_fetch:
+    case AtomicExpr::AO__atomic_sub_fetch:
+    case AtomicExpr::AO__atomic_xor_fetch:
       // For these, only library calls for certain sizes exist.
       UseOptimizedLibcall = true;
       break;
-    default:
+
+    case AtomicExpr::AO__c11_atomic_load:
+    case AtomicExpr::AO__c11_atomic_store:
+    case AtomicExpr::AO__c11_atomic_exchange:
+    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+    case AtomicExpr::AO__atomic_load_n:
+    case AtomicExpr::AO__atomic_load:
+    case AtomicExpr::AO__atomic_store_n:
+    case AtomicExpr::AO__atomic_store:
+    case AtomicExpr::AO__atomic_exchange_n:
+    case AtomicExpr::AO__atomic_exchange:
+    case AtomicExpr::AO__atomic_compare_exchange_n:
+    case AtomicExpr::AO__atomic_compare_exchange:
       // Only use optimized library calls for sizes for which they exist.
       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
         UseOptimizedLibcall = true;
@@ -820,6 +843,9 @@ RValue CodeGenFunction::EmitAtomicExpr(A
     QualType RetTy;
     bool HaveRetTy = false;
     switch (E->getOp()) {
+    case AtomicExpr::AO__c11_atomic_init:
+      llvm_unreachable("Already handled!");
+
     // There is only one libcall for compare an exchange, because there is no
     // optimisation benefit possible from a libcall version of a weak compare
     // and exchange.
@@ -903,7 +929,49 @@ RValue CodeGenFunction::EmitAtomicExpr(A
       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
                         E->getExprLoc(), sizeChars);
       break;
-    default: return EmitUnsupportedRValue(E, "atomic library call");
+    // T __atomic_fetch_nand_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_fetch_nand:
+      LibCallName = "__atomic_fetch_nand";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+
+    // T __atomic_add_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_add_fetch:
+      LibCallName = "__atomic_add_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_and_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_and_fetch:
+      LibCallName = "__atomic_and_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_or_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_or_fetch:
+      LibCallName = "__atomic_or_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_sub_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_sub_fetch:
+      LibCallName = "__atomic_sub_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_xor_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_xor_fetch:
+      LibCallName = "__atomic_xor_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
+    // T __atomic_nand_fetch_N(T *mem, T val, int order)
+    case AtomicExpr::AO__atomic_nand_fetch:
+      LibCallName = "__atomic_nand_fetch";
+      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+                        E->getExprLoc(), sizeChars);
+      break;
     }
 
     // Optimized functions have the size in their name.

Modified: cfe/trunk/test/CodeGen/atomic-ops-libcall.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/atomic-ops-libcall.c?rev=244063&r1=244062&r2=244063&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/atomic-ops-libcall.c (original)
+++ cfe/trunk/test/CodeGen/atomic-ops-libcall.c Wed Aug  5 11:57:36 2015
@@ -35,3 +35,75 @@ int *fp2a(int **p) {
   // Note, the GNU builtins do not multiply by sizeof(T)!
   return __atomic_fetch_sub(p, 4, memory_order_relaxed);
 }
+
+int test_atomic_fetch_add(int *p) {
+  // CHECK: test_atomic_fetch_add
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_add(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_sub(int *p) {
+  // CHECK: test_atomic_fetch_sub
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_and(int *p) {
+  // CHECK: test_atomic_fetch_and
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_and(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_or(int *p) {
+  // CHECK: test_atomic_fetch_or
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_or(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_xor(int *p) {
+  // CHECK: test_atomic_fetch_xor
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_fetch_nand(int *p) {
+  // CHECK: test_atomic_fetch_nand
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_add_fetch(int *p) {
+  // CHECK: test_atomic_add_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_add_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_add_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_sub_fetch(int *p) {
+  // CHECK: test_atomic_sub_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_sub_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_and_fetch(int *p) {
+  // CHECK: test_atomic_and_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_and_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_and_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_or_fetch(int *p) {
+  // CHECK: test_atomic_or_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_or_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_or_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_xor_fetch(int *p) {
+  // CHECK: test_atomic_xor_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_xor_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
+}
+
+int test_atomic_nand_fetch(int *p) {
+  // CHECK: test_atomic_nand_fetch
+  // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_nand_fetch_4(i8* {{%[0-9]+}}, i32 55, i32 5)
+  return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
+}




More information about the cfe-commits mailing list