[llvm] r262876 - [tsan] Add support for pointer typed atomic stores, loads, and cmpxchg

Anna Zaks via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 7 15:16:23 PST 2016


Author: zaks
Date: Mon Mar  7 17:16:23 2016
New Revision: 262876

URL: http://llvm.org/viewvc/llvm-project?rev=262876&view=rev
Log:
[tsan] Add support for pointer typed atomic stores, loads, and cmpxchg

TSan instrumentation functions for atomic stores, loads, and cmpxchg work on
integer value types. This patch adds casts before calling TSan instrumentation
functions in cases where the value is a pointer.

Differential Revision: http://reviews.llvm.org/D17833

Modified:
    llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
    llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll

Modified: llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp?rev=262876&r1=262875&r2=262876&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp Mon Mar  7 17:16:23 2016
@@ -496,6 +496,11 @@ bool ThreadSanitizer::instrumentMemIntri
   return false;
 }
 
+static Value *createIntOrPtrToIntCast(Value *V, Type* Ty, IRBuilder<> &IRB) {
+  return isa<PointerType>(V->getType()) ?
+    IRB.CreatePtrToInt(V, Ty) : IRB.CreateIntCast(V, Ty, false);
+}
+
 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
 // standards.  For background see C++11 standard.  A slightly older, publicly
 // available draft of the standard (not entirely up-to-date, but close enough
@@ -517,9 +522,16 @@ bool ThreadSanitizer::instrumentAtomic(I
     Type *PtrTy = Ty->getPointerTo();
     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
                      createOrdering(&IRB, LI->getOrdering())};
-    CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args);
-    ReplaceInstWithInst(I, C);
-
+    Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
+    if (Ty == OrigTy) {
+      Instruction *C = CallInst::Create(TsanAtomicLoad[Idx], Args);
+      ReplaceInstWithInst(I, C);
+    } else {
+      // We are loading a pointer, so we need to cast the return value.
+      Value *C = IRB.CreateCall(TsanAtomicLoad[Idx], Args);
+      Instruction *Cast = CastInst::Create(Instruction::IntToPtr, C, OrigTy);
+      ReplaceInstWithInst(I, Cast);
+    }
   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
     Value *Addr = SI->getPointerOperand();
     int Idx = getMemoryAccessFuncIndex(Addr, DL);
@@ -530,7 +542,7 @@ bool ThreadSanitizer::instrumentAtomic(I
     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
     Type *PtrTy = Ty->getPointerTo();
     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
-                     IRB.CreateIntCast(SI->getValueOperand(), Ty, false),
+                     createIntOrPtrToIntCast(SI->getValueOperand(), Ty, IRB),
                      createOrdering(&IRB, SI->getOrdering())};
     CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
     ReplaceInstWithInst(I, C);
@@ -560,15 +572,26 @@ bool ThreadSanitizer::instrumentAtomic(I
     const unsigned BitSize = ByteSize * 8;
     Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
     Type *PtrTy = Ty->getPointerTo();
+    Value *CmpOperand =
+      createIntOrPtrToIntCast(CASI->getCompareOperand(), Ty, IRB);
+    Value *NewOperand =
+      createIntOrPtrToIntCast(CASI->getNewValOperand(), Ty, IRB);
     Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
-                     IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
-                     IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
+                     CmpOperand,
+                     NewOperand,
                      createOrdering(&IRB, CASI->getSuccessOrdering()),
                      createOrdering(&IRB, CASI->getFailureOrdering())};
     CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
-    Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand());
+    Value *Success = IRB.CreateICmpEQ(C, CmpOperand);
+    Value *OldVal = C;
+    Type *OrigOldValTy = CASI->getNewValOperand()->getType();
+    if (Ty != OrigOldValTy) {
+      // The value is a pointer, so we need to cast the return value.
+      OldVal = IRB.CreateIntToPtr(C, OrigOldValTy);
+    }
 
-    Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0);
+    Value *Res =
+      IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
     Res = IRB.CreateInsertValue(Res, Success, 1);
 
     I->replaceAllUsesWith(Res);

Modified: llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll?rev=262876&r1=262875&r2=262876&view=diff
==============================================================================
--- llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll (original)
+++ llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll Mon Mar  7 17:16:23 2016
@@ -1186,6 +1186,16 @@ entry:
 ; CHECK-LABEL: atomic64_load_seq_cst
 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 5), !dbg
 
+define i8* @atomic64_load_seq_cst_ptr_ty(i8** %a) nounwind uwtable {
+entry:
+  %0 = load atomic i8*, i8** %a seq_cst, align 8, !dbg !7
+  ret i8* %0, !dbg !7
+}
+; CHECK-LABEL: atomic64_load_seq_cst
+; CHECK: bitcast i8** %{{.+}} to i64*
+; CHECK-NEXT: call i64 @__tsan_atomic64_load(i64* %{{.+}}, i32 5), !dbg
+; CHECK-NEXT: inttoptr i64 %{{.+}} to i8*
+
 define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
 entry:
   store atomic i64 0, i64* %a unordered, align 8, !dbg !7
@@ -1218,6 +1228,16 @@ entry:
 ; CHECK-LABEL: atomic64_store_seq_cst
 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 5), !dbg
 
+define void @atomic64_store_seq_cst_ptr_ty(i8** %a, i8* %v) nounwind uwtable {
+entry:
+  store atomic i8* %v, i8** %a seq_cst, align 8, !dbg !7
+  ret void, !dbg !7
+}
+; CHECK-LABEL: atomic64_store_seq_cst
+; CHECK: %{{.*}} = bitcast i8** %{{.*}} to i64*
+; CHECK-NEXT: %{{.*}} = ptrtoint i8* %{{.*}} to i64
+; CHECK-NEXT: call void @__tsan_atomic64_store(i64* %{{.*}}, i64 %{{.*}}, i32 5), !dbg
+
 define void @atomic64_xchg_monotonic(i64* %a) nounwind uwtable {
 entry:
   atomicrmw xchg i64* %a, i64 0 monotonic, !dbg !7
@@ -1538,6 +1558,21 @@ entry:
 ; CHECK-LABEL: atomic64_cas_seq_cst
 ; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 5), !dbg
 
+define void @atomic64_cas_seq_cst_ptr_ty(i8** %a, i8* %v1, i8* %v2) nounwind uwtable {
+entry:
+  cmpxchg i8** %a, i8* %v1, i8* %v2 seq_cst seq_cst, !dbg !7
+  ret void
+}
+; CHECK-LABEL: atomic64_cas_seq_cst
+; CHECK: {{.*}} = ptrtoint i8* %v1 to i64
+; CHECK-NEXT: {{.*}} = ptrtoint i8* %v2 to i64
+; CHECK-NEXT: {{.*}} = bitcast i8** %a to i64*
+; CHECK-NEXT: {{.*}} = call i64 @__tsan_atomic64_compare_exchange_val(i64* {{.*}}, i64 {{.*}}, i64 {{.*}}, i32 5, i32 5), !dbg
+; CHECK-NEXT: {{.*}} = icmp eq i64
+; CHECK-NEXT: {{.*}} = inttoptr i64 {{.*}} to i8*
+; CHECK-NEXT: {{.*}} = insertvalue { i8*, i1 } undef, i8* {{.*}}, 0
+; CHECK-NEXT: {{.*}} = insertvalue { i8*, i1 } {{.*}}, i1 {{.*}}, 1
+
 define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
 entry:
   %0 = load atomic i128, i128* %a unordered, align 16, !dbg !7




More information about the llvm-commits mailing list