[llvm] r223464 - [msan] Avoid extra origin address realignment.
Evgeniy Stepanov
eugeni.stepanov at gmail.com
Fri Dec 5 06:34:04 PST 2014
Author: eugenis
Date: Fri Dec 5 08:34:03 2014
New Revision: 223464
URL: http://llvm.org/viewvc/llvm-project?rev=223464&view=rev
Log:
[msan] Avoid extra origin address realignment.
Do not realign origin address if the corresponding application
address is at least 4-byte-aligned.
Saves 2.5% code size in track-origins mode.
Added:
llvm/trunk/test/Instrumentation/MemorySanitizer/origin-alignment.ll
Modified:
llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp
Modified: llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp?rev=223464&r1=223463&r2=223464&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp Fri Dec 5 08:34:03 2014
@@ -522,9 +522,11 @@ struct MemorySanitizerVisitor : public I
void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
unsigned Alignment, bool AsCall) {
+ unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
if (isa<StructType>(Shadow->getType())) {
- IRB.CreateAlignedStore(updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB),
- Alignment);
+ IRB.CreateAlignedStore(updateOrigin(Origin, IRB),
+ getOriginPtr(Addr, IRB, Alignment),
+ OriginAlignment);
} else {
Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
// TODO(eugenis): handle non-zero constant shadow by inserting an
@@ -549,7 +551,8 @@ struct MemorySanitizerVisitor : public I
Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
IRBuilder<> IRBNew(CheckTerm);
IRBNew.CreateAlignedStore(updateOrigin(Origin, IRBNew),
- getOriginPtr(Addr, IRBNew), Alignment);
+ getOriginPtr(Addr, IRBNew, Alignment),
+ OriginAlignment);
}
}
}
@@ -573,11 +576,9 @@ struct MemorySanitizerVisitor : public I
if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
- if (MS.TrackOrigins) {
- unsigned Alignment = std::max(kMinOriginAlignment, SI.getAlignment());
- storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment,
+ if (MS.TrackOrigins)
+ storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(),
InstrumentWithCalls);
- }
}
}
@@ -739,16 +740,17 @@ struct MemorySanitizerVisitor : public I
/// address.
///
/// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
- Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
+ Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) {
Value *ShadowLong =
- IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
- Value *Add =
- IRB.CreateAdd(ShadowLong,
- ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
- Value *SecondAnd =
- IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
- return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
+ IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
+ ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
+ Value *Origin = IRB.CreateAdd(
+ ShadowLong, ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
+ if (Alignment < kMinOriginAlignment) {
+ uint64_t Mask = kMinOriginAlignment - 1;
+ Origin = IRB.CreateAnd(Origin, ConstantInt::get(MS.IntptrTy, ~Mask));
+ }
+ return IRB.CreateIntToPtr(Origin, PointerType::get(IRB.getInt32Ty(), 0));
}
/// \brief Compute the shadow address for a given function argument.
@@ -1052,9 +1054,10 @@ struct MemorySanitizerVisitor : public I
if (MS.TrackOrigins) {
if (PropagateShadow) {
- unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
- setOrigin(&I,
- IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
+ unsigned Alignment = I.getAlignment();
+ unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
+ setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment),
+ OriginAlignment));
} else {
setOrigin(&I, getCleanOrigin());
}
@@ -1706,7 +1709,7 @@ struct MemorySanitizerVisitor : public I
// FIXME: use ClStoreCleanOrigin
// FIXME: factor out common code from materializeStores
if (MS.TrackOrigins)
- IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
+ IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1));
return true;
}
@@ -1733,7 +1736,7 @@ struct MemorySanitizerVisitor : public I
if (MS.TrackOrigins) {
if (PropagateShadow)
- setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
+ setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1)));
else
setOrigin(&I, getCleanOrigin());
}
Added: llvm/trunk/test/Instrumentation/MemorySanitizer/origin-alignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Instrumentation/MemorySanitizer/origin-alignment.ll?rev=223464&view=auto
==============================================================================
--- llvm/trunk/test/Instrumentation/MemorySanitizer/origin-alignment.ll (added)
+++ llvm/trunk/test/Instrumentation/MemorySanitizer/origin-alignment.ll Fri Dec 5 08:34:03 2014
@@ -0,0 +1,73 @@
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS1 %s
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS2 %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+
+; Check origin instrumentation of stores.
+; Check that debug info for origin propagation code is set correctly.
+
+ at a8 = global i8 0, align 8
+ at a4 = global i8 0, align 4
+ at a2 = global i8 0, align 2
+ at a1 = global i8 0, align 1
+
+; 8-aligned store => 8-aligned origin store, origin address is not realigned
+define void @Store8(i8 %x) sanitize_memory {
+entry:
+ store i8 %x, i8* @a8, align 8
+ ret void
+}
+
+; CHECK-LABEL: @Store8
+; CHECK-ORIGINS1: [[ORIGIN:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK-ORIGINS2: [[ORIGIN0:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK-ORIGINS2: [[ORIGIN:%[01-9a-z]+]] = call i32 @__msan_chain_origin(i32 [[ORIGIN0]])
+; CHECK: store i32 [[ORIGIN]], i32* inttoptr (i64 add (i64 and (i64 ptrtoint {{.*}} to i32*), align 8
+; CHECK: ret void
+
+
+; 4-aligned store => 4-aligned origin store, origin address is not realigned
+define void @Store4(i8 %x) sanitize_memory {
+entry:
+ store i8 %x, i8* @a4, align 4
+ ret void
+}
+
+; CHECK-LABEL: @Store4
+; CHECK-ORIGINS1: [[ORIGIN:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK-ORIGINS2: [[ORIGIN0:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK-ORIGINS2: [[ORIGIN:%[01-9a-z]+]] = call i32 @__msan_chain_origin(i32 [[ORIGIN0]])
+; CHECK: store i32 [[ORIGIN]], i32* inttoptr (i64 add (i64 and (i64 ptrtoint {{.*}} to i32*), align 4
+; CHECK: ret void
+
+
+; 2-aligned store => 4-aligned origin store, origin address is realigned
+define void @Store2(i8 %x) sanitize_memory {
+entry:
+ store i8 %x, i8* @a2, align 2
+ ret void
+}
+
+; CHECK-LABEL: @Store2
+; CHECK-ORIGINS1: [[ORIGIN:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK-ORIGINS2: [[ORIGIN0:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK-ORIGINS2: [[ORIGIN:%[01-9a-z]+]] = call i32 @__msan_chain_origin(i32 [[ORIGIN0]])
+; CHECK: store i32 [[ORIGIN]], i32* inttoptr (i64 and (i64 add (i64 and (i64 ptrtoint {{.*}} i64 -4) to i32*), align 4
+; CHECK: ret void
+
+
+; 1-aligned store => 4-aligned origin store, origin address is realigned
+define void @Store1(i8 %x) sanitize_memory {
+entry:
+ store i8 %x, i8* @a1, align 1
+ ret void
+}
+
+; CHECK-LABEL: @Store1
+; CHECK-ORIGINS1: [[ORIGIN:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK-ORIGINS2: [[ORIGIN0:%[01-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
+; CHECK-ORIGINS2: [[ORIGIN:%[01-9a-z]+]] = call i32 @__msan_chain_origin(i32 [[ORIGIN0]])
+; CHECK: store i32 [[ORIGIN]], i32* inttoptr (i64 and (i64 add (i64 and (i64 ptrtoint {{.*}} i64 -4) to i32*), align 4
+; CHECK: ret void
More information about the llvm-commits
mailing list