<div dir="ltr"><div><div>Ping.<br><br></div>This patch tries to fix the SEGV when atomic operations is going to be lowered to libcall (e.g. the implementation for armv5e.) The root cause is due to the incorrect bitcast from int to pointers. This patch should fix the problem. Please have a look. Thanks.<br>
<br></div>Sincerely,<br>Logan<br></div><div class="gmail_extra"><br><br><div class="gmail_quote">On Tue, Mar 11, 2014 at 10:28 PM, Logan Chien <span dir="ltr"><<a href="mailto:tzuhsiang.chien@gmail.com" target="_blank">tzuhsiang.chien@gmail.com</a>></span> wrote:<br>
<blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"><div dir="ltr">Ping<br></div><div class="HOEnZb"><div class="h5"><div class="gmail_extra"><br><br><div class="gmail_quote">
On Mon, Mar 10, 2014 at 11:55 PM, Logan Chien <span dir="ltr"><<a href="mailto:tzuhsiang.chien@gmail.com" target="_blank">tzuhsiang.chien@gmail.com</a>></span> wrote:<br>
<blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"> Revise the test case for no assertion build.<br>
<br>
<a href="http://llvm-reviews.chandlerc.com/D3006" target="_blank">http://llvm-reviews.chandlerc.com/D3006</a><br>
<br>
CHANGE SINCE LAST DIFF<br>
<a href="http://llvm-reviews.chandlerc.com/D3006?vs=7642&id=7700#toc" target="_blank">http://llvm-reviews.chandlerc.com/D3006?vs=7642&id=7700#toc</a><br>
<div><div><br>
Files:<br>
lib/CodeGen/CGAtomic.cpp<br>
test/CodeGen/atomic-ops-libcall.c<br>
<br>
Index: lib/CodeGen/CGAtomic.cpp<br>
===================================================================<br>
--- lib/CodeGen/CGAtomic.cpp<br>
+++ lib/CodeGen/CGAtomic.cpp<br>
@@ -476,6 +476,8 @@<br>
Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);<br>
<br>
std::string LibCallName;<br>
+ QualType LoweredMemTy =<br>
+ MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;<br>
QualType RetTy;<br>
bool HaveRetTy = false;<br>
switch (E->getOp()) {<br>
@@ -531,7 +533,7 @@<br>
case AtomicExpr::AO__c11_atomic_fetch_add:<br>
case AtomicExpr::AO__atomic_fetch_add:<br>
LibCallName = "__atomic_fetch_add";<br>
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,<br>
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,<br>
E->getExprLoc());<br>
break;<br>
// T __atomic_fetch_and_N(T *mem, T val, int order)<br>
@@ -552,7 +554,7 @@<br>
case AtomicExpr::AO__c11_atomic_fetch_sub:<br>
case AtomicExpr::AO__atomic_fetch_sub:<br>
LibCallName = "__atomic_fetch_sub";<br>
- AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,<br>
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,<br>
E->getExprLoc());<br>
break;<br>
// T __atomic_fetch_xor_N(T *mem, T val, int order)<br>
Index: test/CodeGen/atomic-ops-libcall.c<br>
===================================================================<br>
--- /dev/null<br>
+++ test/CodeGen/atomic-ops-libcall.c<br>
</div></div>@@ -0,0 +1,37 @@<br>
+// RUN: %clang_cc1 < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s<br>
<div>+<br>
+enum memory_order {<br>
+ memory_order_relaxed, memory_order_consume, memory_order_acquire,<br>
+ memory_order_release, memory_order_acq_rel, memory_order_seq_cst<br>
+};<br>
+<br>
+int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {<br>
+ // CHECK: test_c11_atomic_fetch_add_int_ptr<br>
</div>+ // CHECK: {{%[^ ]*}} = tail call i32* @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5)<br>
<div>+ return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);<br>
+}<br>
+<br>
+int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {<br>
+ // CHECK: test_c11_atomic_fetch_sub_int_ptr<br>
</div>+ // CHECK: {{%[^ ]*}} = tail call i32* @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5)<br>
<div>+ return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);<br>
+}<br>
+<br>
+int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {<br>
+ // CHECK: test_c11_atomic_fetch_add_int<br>
</div>+ // CHECK: {{%[^ ]*}} = tail call i32 bitcast (i32* (i8*, i32, i32)* @__atomic_fetch_add_4 to i32 (i8*, i32, i32)*)(i8* {{%[0-9]+}}, i32 3, i32 5)<br>
<div>+ return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);<br>
+}<br>
+<br>
+int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {<br>
+ // CHECK: test_c11_atomic_fetch_sub_int<br>
</div>+ // CHECK: {{%[^ ]*}} = tail call i32 bitcast (i32* (i8*, i32, i32)* @__atomic_fetch_sub_4 to i32 (i8*, i32, i32)*)(i8* {{%[0-9]+}}, i32 5, i32 5)<br>
<div>+ return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);<br>
+}<br>
+<br>
+int *fp2a(int **p) {<br>
+ // CHECK: @fp2a<br>
</div>+ // CHECK: {{%[^ ]*}} = tail call i32* @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0)<br>
<div><div>+ // Note, the GNU builtins do not multiply by sizeof(T)!<br>
+ return __atomic_fetch_sub(p, 4, memory_order_relaxed);<br>
+}<br>
</div></div></blockquote></div><br></div>
</div></div></blockquote></div><br></div>