[llvm-commits] [llvm] r168684 - in /llvm/trunk: lib/Transforms/Instrumentation/ThreadSanitizer.cpp test/Instrumentation/ThreadSanitizer/atomic.ll

Dmitry Vyukov dvyukov at google.com
Tue Nov 27 00:09:25 PST 2012


Author: dvyukov
Date: Tue Nov 27 02:09:25 2012
New Revision: 168684

URL: http://llvm.org/viewvc/llvm-project?rev=168684&view=rev
Log:
tsan: instrument atomic nand operation

Modified:
    llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
    llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll

Modified: llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp?rev=168684&r1=168683&r2=168684&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp Tue Nov 27 02:09:25 2012
@@ -188,6 +188,8 @@
         NamePart = "_fetch_or";
       else if (op == AtomicRMWInst::Xor)
         NamePart = "_fetch_xor";
+      else if (op == AtomicRMWInst::Nand)
+        NamePart = "_fetch_nand";
       else
         continue;
       SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);

Modified: llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll?rev=168684&r1=168683&r2=168684&view=diff
==============================================================================
--- llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll (original)
+++ llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll Tue Nov 27 02:09:25 2012
@@ -114,6 +114,14 @@
 ; CHECK: atomic8_xor_monotonic
 ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 0)
 
+define void @atomic8_nand_monotonic(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i8* %a, i8 0 monotonic
+  ret void
+}
+; CHECK: atomic8_nand_monotonic
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 0)
+
 define void @atomic8_xchg_acquire(i8* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i8* %a, i8 0 acquire
@@ -162,6 +170,14 @@
 ; CHECK: atomic8_xor_acquire
 ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 2)
 
+define void @atomic8_nand_acquire(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i8* %a, i8 0 acquire
+  ret void
+}
+; CHECK: atomic8_nand_acquire
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 2)
+
 define void @atomic8_xchg_release(i8* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i8* %a, i8 0 release
@@ -210,6 +226,14 @@
 ; CHECK: atomic8_xor_release
 ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 3)
 
+define void @atomic8_nand_release(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i8* %a, i8 0 release
+  ret void
+}
+; CHECK: atomic8_nand_release
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 3)
+
 define void @atomic8_xchg_acq_rel(i8* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i8* %a, i8 0 acq_rel
@@ -258,6 +282,14 @@
 ; CHECK: atomic8_xor_acq_rel
 ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 4)
 
+define void @atomic8_nand_acq_rel(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i8* %a, i8 0 acq_rel
+  ret void
+}
+; CHECK: atomic8_nand_acq_rel
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 4)
+
 define void @atomic8_xchg_seq_cst(i8* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i8* %a, i8 0 seq_cst
@@ -306,6 +338,14 @@
 ; CHECK: atomic8_xor_seq_cst
 ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 5)
 
+define void @atomic8_nand_seq_cst(i8* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i8* %a, i8 0 seq_cst
+  ret void
+}
+; CHECK: atomic8_nand_seq_cst
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 5)
+
 define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable {
 entry:
   cmpxchg i8* %a, i8 0, i8 1 monotonic
@@ -458,6 +498,14 @@
 ; CHECK: atomic16_xor_monotonic
 ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 0)
 
+define void @atomic16_nand_monotonic(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i16* %a, i16 0 monotonic
+  ret void
+}
+; CHECK: atomic16_nand_monotonic
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 0)
+
 define void @atomic16_xchg_acquire(i16* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i16* %a, i16 0 acquire
@@ -506,6 +554,14 @@
 ; CHECK: atomic16_xor_acquire
 ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 2)
 
+define void @atomic16_nand_acquire(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i16* %a, i16 0 acquire
+  ret void
+}
+; CHECK: atomic16_nand_acquire
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 2)
+
 define void @atomic16_xchg_release(i16* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i16* %a, i16 0 release
@@ -554,6 +610,14 @@
 ; CHECK: atomic16_xor_release
 ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 3)
 
+define void @atomic16_nand_release(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i16* %a, i16 0 release
+  ret void
+}
+; CHECK: atomic16_nand_release
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 3)
+
 define void @atomic16_xchg_acq_rel(i16* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i16* %a, i16 0 acq_rel
@@ -602,6 +666,14 @@
 ; CHECK: atomic16_xor_acq_rel
 ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 4)
 
+define void @atomic16_nand_acq_rel(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i16* %a, i16 0 acq_rel
+  ret void
+}
+; CHECK: atomic16_nand_acq_rel
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 4)
+
 define void @atomic16_xchg_seq_cst(i16* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i16* %a, i16 0 seq_cst
@@ -650,6 +722,14 @@
 ; CHECK: atomic16_xor_seq_cst
 ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 5)
 
+define void @atomic16_nand_seq_cst(i16* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i16* %a, i16 0 seq_cst
+  ret void
+}
+; CHECK: atomic16_nand_seq_cst
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 5)
+
 define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable {
 entry:
   cmpxchg i16* %a, i16 0, i16 1 monotonic
@@ -802,6 +882,14 @@
 ; CHECK: atomic32_xor_monotonic
 ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 0)
 
+define void @atomic32_nand_monotonic(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i32* %a, i32 0 monotonic
+  ret void
+}
+; CHECK: atomic32_nand_monotonic
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 0)
+
 define void @atomic32_xchg_acquire(i32* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i32* %a, i32 0 acquire
@@ -850,6 +938,14 @@
 ; CHECK: atomic32_xor_acquire
 ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 2)
 
+define void @atomic32_nand_acquire(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i32* %a, i32 0 acquire
+  ret void
+}
+; CHECK: atomic32_nand_acquire
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 2)
+
 define void @atomic32_xchg_release(i32* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i32* %a, i32 0 release
@@ -898,6 +994,14 @@
 ; CHECK: atomic32_xor_release
 ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 3)
 
+define void @atomic32_nand_release(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i32* %a, i32 0 release
+  ret void
+}
+; CHECK: atomic32_nand_release
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 3)
+
 define void @atomic32_xchg_acq_rel(i32* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i32* %a, i32 0 acq_rel
@@ -946,6 +1050,14 @@
 ; CHECK: atomic32_xor_acq_rel
 ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 4)
 
+define void @atomic32_nand_acq_rel(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i32* %a, i32 0 acq_rel
+  ret void
+}
+; CHECK: atomic32_nand_acq_rel
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 4)
+
 define void @atomic32_xchg_seq_cst(i32* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i32* %a, i32 0 seq_cst
@@ -994,6 +1106,14 @@
 ; CHECK: atomic32_xor_seq_cst
 ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 5)
 
+define void @atomic32_nand_seq_cst(i32* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i32* %a, i32 0 seq_cst
+  ret void
+}
+; CHECK: atomic32_nand_seq_cst
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 5)
+
 define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable {
 entry:
   cmpxchg i32* %a, i32 0, i32 1 monotonic
@@ -1146,6 +1266,14 @@
 ; CHECK: atomic64_xor_monotonic
 ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 0)
 
+define void @atomic64_nand_monotonic(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i64* %a, i64 0 monotonic
+  ret void
+}
+; CHECK: atomic64_nand_monotonic
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 0)
+
 define void @atomic64_xchg_acquire(i64* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i64* %a, i64 0 acquire
@@ -1194,6 +1322,14 @@
 ; CHECK: atomic64_xor_acquire
 ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 2)
 
+define void @atomic64_nand_acquire(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i64* %a, i64 0 acquire
+  ret void
+}
+; CHECK: atomic64_nand_acquire
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 2)
+
 define void @atomic64_xchg_release(i64* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i64* %a, i64 0 release
@@ -1242,6 +1378,14 @@
 ; CHECK: atomic64_xor_release
 ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 3)
 
+define void @atomic64_nand_release(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i64* %a, i64 0 release
+  ret void
+}
+; CHECK: atomic64_nand_release
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 3)
+
 define void @atomic64_xchg_acq_rel(i64* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i64* %a, i64 0 acq_rel
@@ -1290,6 +1434,14 @@
 ; CHECK: atomic64_xor_acq_rel
 ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 4)
 
+define void @atomic64_nand_acq_rel(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i64* %a, i64 0 acq_rel
+  ret void
+}
+; CHECK: atomic64_nand_acq_rel
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 4)
+
 define void @atomic64_xchg_seq_cst(i64* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i64* %a, i64 0 seq_cst
@@ -1338,6 +1490,14 @@
 ; CHECK: atomic64_xor_seq_cst
 ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 5)
 
+define void @atomic64_nand_seq_cst(i64* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i64* %a, i64 0 seq_cst
+  ret void
+}
+; CHECK: atomic64_nand_seq_cst
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 5)
+
 define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable {
 entry:
   cmpxchg i64* %a, i64 0, i64 1 monotonic
@@ -1490,6 +1650,14 @@
 ; CHECK: atomic128_xor_monotonic
 ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 0)
 
+define void @atomic128_nand_monotonic(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i128* %a, i128 0 monotonic
+  ret void
+}
+; CHECK: atomic128_nand_monotonic
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 0)
+
 define void @atomic128_xchg_acquire(i128* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i128* %a, i128 0 acquire
@@ -1538,6 +1706,14 @@
 ; CHECK: atomic128_xor_acquire
 ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 2)
 
+define void @atomic128_nand_acquire(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i128* %a, i128 0 acquire
+  ret void
+}
+; CHECK: atomic128_nand_acquire
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 2)
+
 define void @atomic128_xchg_release(i128* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i128* %a, i128 0 release
@@ -1586,6 +1762,14 @@
 ; CHECK: atomic128_xor_release
 ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 3)
 
+define void @atomic128_nand_release(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i128* %a, i128 0 release
+  ret void
+}
+; CHECK: atomic128_nand_release
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 3)
+
 define void @atomic128_xchg_acq_rel(i128* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i128* %a, i128 0 acq_rel
@@ -1634,6 +1818,14 @@
 ; CHECK: atomic128_xor_acq_rel
 ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 4)
 
+define void @atomic128_nand_acq_rel(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i128* %a, i128 0 acq_rel
+  ret void
+}
+; CHECK: atomic128_nand_acq_rel
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 4)
+
 define void @atomic128_xchg_seq_cst(i128* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i128* %a, i128 0 seq_cst
@@ -1682,6 +1874,14 @@
 ; CHECK: atomic128_xor_seq_cst
 ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 5)
 
+define void @atomic128_nand_seq_cst(i128* %a) nounwind uwtable {
+entry:
+  atomicrmw nand i128* %a, i128 0 seq_cst
+  ret void
+}
+; CHECK: atomic128_nand_seq_cst
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 5)
+
 define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable {
 entry:
   cmpxchg i128* %a, i128 0, i128 1 monotonic





More information about the llvm-commits mailing list