[llvm] r227231 - tsan: properly instrument unaligned accesses
Dmitry Vyukov
dvyukov at google.com
Tue Jan 27 12:19:17 PST 2015
Author: dvyukov
Date: Tue Jan 27 14:19:17 2015
New Revision: 227231
URL: http://llvm.org/viewvc/llvm-project?rev=227231&view=rev
Log:
tsan: properly instrument unaligned accesses
If a memory access is unaligned, emit __tsan_unaligned_read/write
callbacks instead of __tsan_read/write.
Required to change semantics of __tsan_unaligned_read/write to not do the user memory.
But since they were unused (other than through __sanitizer_unaligned_load/store) this is fine.
Fixes long standing issue 17:
https://code.google.com/p/thread-sanitizer/issues/detail?id=17
Added:
llvm/trunk/test/Instrumentation/ThreadSanitizer/unaligned.ll
Modified:
llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
Modified: llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp?rev=227231&r1=227230&r2=227231&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp Tue Jan 27 14:19:17 2015
@@ -99,6 +99,8 @@ struct ThreadSanitizer : public Function
static const size_t kNumberOfAccessSizes = 5;
Function *TsanRead[kNumberOfAccessSizes];
Function *TsanWrite[kNumberOfAccessSizes];
+ Function *TsanUnalignedRead[kNumberOfAccessSizes];
+ Function *TsanUnalignedWrite[kNumberOfAccessSizes];
Function *TsanAtomicLoad[kNumberOfAccessSizes];
Function *TsanAtomicStore[kNumberOfAccessSizes];
Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
@@ -150,6 +152,16 @@ void ThreadSanitizer::initializeCallback
TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
+ SmallString<64> UnalignedReadName("__tsan_unaligned_read" +
+ itostr(ByteSize));
+ TsanUnalignedRead[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ UnalignedReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
+
+ SmallString<64> UnalignedWriteName("__tsan_unaligned_write" +
+ itostr(ByteSize));
+ TsanUnalignedWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
+ UnalignedWriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
+
Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
Type *PtrTy = Ty->getPointerTo();
SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) +
@@ -412,7 +424,16 @@ bool ThreadSanitizer::instrumentLoadOrSt
NumInstrumentedVtableReads++;
return true;
}
- Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
+ const unsigned Alignment = IsWrite
+ ? cast<StoreInst>(I)->getAlignment()
+ : cast<LoadInst>(I)->getAlignment();
+ Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
+ const uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
+ Value *OnAccessFunc = nullptr;
+ if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
+ OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
+ else
+ OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
if (IsWrite) NumInstrumentedWrites++;
else NumInstrumentedReads++;
Added: llvm/trunk/test/Instrumentation/ThreadSanitizer/unaligned.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Instrumentation/ThreadSanitizer/unaligned.ll?rev=227231&view=auto
==============================================================================
--- llvm/trunk/test/Instrumentation/ThreadSanitizer/unaligned.ll (added)
+++ llvm/trunk/test/Instrumentation/ThreadSanitizer/unaligned.ll Tue Jan 27 14:19:17 2015
@@ -0,0 +1,143 @@
+; RUN: opt < %s -tsan -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+define i16 @test_unaligned_read2(i16* %a) sanitize_thread {
+entry:
+ %tmp1 = load i16* %a, align 1
+ ret i16 %tmp1
+}
+
+; CHECK-LABEL: define i16 @test_unaligned_read2(i16* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i16* %a to i8*
+; CHECK-NEXT: call void @__tsan_unaligned_read2(i8* %1)
+; CHECK-NEXT: %tmp1 = load i16* %a, align 1
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret i16
+
+define i32 @test_unaligned_read4(i32* %a) sanitize_thread {
+entry:
+ %tmp1 = load i32* %a, align 2
+ ret i32 %tmp1
+}
+
+; CHECK-LABEL: define i32 @test_unaligned_read4(i32* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i32* %a to i8*
+; CHECK-NEXT: call void @__tsan_unaligned_read4(i8* %1)
+; CHECK-NEXT: %tmp1 = load i32* %a, align 2
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret i32
+
+define i64 @test_unaligned_read8(i64* %a) sanitize_thread {
+entry:
+ %tmp1 = load i64* %a, align 4
+ ret i64 %tmp1
+}
+
+; CHECK-LABEL: define i64 @test_unaligned_read8(i64* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i64* %a to i8*
+; CHECK-NEXT: call void @__tsan_unaligned_read8(i8* %1)
+; CHECK-NEXT: %tmp1 = load i64* %a, align 4
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret i64
+
+define i128 @test_unaligned_read16(i128* %a) sanitize_thread {
+entry:
+ %tmp1 = load i128* %a, align 1
+ ret i128 %tmp1
+}
+
+; CHECK-LABEL: define i128 @test_unaligned_read16(i128* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i128* %a to i8*
+; CHECK-NEXT: call void @__tsan_unaligned_read16(i8* %1)
+; CHECK-NEXT: %tmp1 = load i128* %a, align 1
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret i128
+
+define i128 @test_aligned_read16(i128* %a) sanitize_thread {
+entry:
+ %tmp1 = load i128* %a, align 8
+ ret i128 %tmp1
+}
+
+; CHECK-LABEL: define i128 @test_aligned_read16(i128* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i128* %a to i8*
+; CHECK-NEXT: call void @__tsan_read16(i8* %1)
+; CHECK-NEXT: %tmp1 = load i128* %a, align 8
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret i128
+
+define void @test_unaligned_write2(i16* %a) sanitize_thread {
+entry:
+ store i16 1, i16* %a, align 1
+ ret void
+}
+
+; CHECK-LABEL: define void @test_unaligned_write2(i16* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i16* %a to i8*
+; CHECK-NEXT: call void @__tsan_unaligned_write2(i8* %1)
+; CHECK-NEXT: store i16 1, i16* %a, align 1
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret void
+
+define void @test_unaligned_write4(i32* %a) sanitize_thread {
+entry:
+ store i32 1, i32* %a, align 1
+ ret void
+}
+
+; CHECK-LABEL: define void @test_unaligned_write4(i32* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i32* %a to i8*
+; CHECK-NEXT: call void @__tsan_unaligned_write4(i8* %1)
+; CHECK-NEXT: store i32 1, i32* %a, align 1
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret void
+
+define void @test_unaligned_write8(i64* %a) sanitize_thread {
+entry:
+ store i64 1, i64* %a, align 1
+ ret void
+}
+
+; CHECK-LABEL: define void @test_unaligned_write8(i64* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i64* %a to i8*
+; CHECK-NEXT: call void @__tsan_unaligned_write8(i8* %1)
+; CHECK-NEXT: store i64 1, i64* %a, align 1
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret void
+
+define void @test_unaligned_write16(i128* %a) sanitize_thread {
+entry:
+ store i128 1, i128* %a, align 1
+ ret void
+}
+
+; CHECK-LABEL: define void @test_unaligned_write16(i128* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i128* %a to i8*
+; CHECK-NEXT: call void @__tsan_unaligned_write16(i8* %1)
+; CHECK-NEXT: store i128 1, i128* %a, align 1
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret void
+
+define void @test_aligned_write16(i128* %a) sanitize_thread {
+entry:
+ store i128 1, i128* %a, align 8
+ ret void
+}
+
+; CHECK-LABEL: define void @test_aligned_write16(i128* %a)
+; CHECK: call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT: %1 = bitcast i128* %a to i8*
+; CHECK-NEXT: call void @__tsan_write16(i8* %1)
+; CHECK-NEXT: store i128 1, i128* %a, align 8
+; CHECK-NEXT: call void @__tsan_func_exit()
+; CHECK: ret void
More information about the llvm-commits
mailing list