[llvm-branch-commits] [llvm] e7f3509 - [msan][aarch64] Fix cleanup of unused part of overflow area
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Nov 17 17:01:32 PST 2023
Author: Vitaly Buka
Date: 2023-11-17T16:48:05-08:00
New Revision: e7f350951b304f0426832b0b6482c8bedf239c23
URL: https://github.com/llvm/llvm-project/commit/e7f350951b304f0426832b0b6482c8bedf239c23
DIFF: https://github.com/llvm/llvm-project/commit/e7f350951b304f0426832b0b6482c8bedf239c23.diff
LOG: [msan][aarch64] Fix cleanup of unused part of overflow area
Similar to a05e736d288a7f2009ee9d057e78713d9adeeb5f.
Reviewers: thurstond, kstoimenov
Reviewed By: thurstond
Pull Request: https://github.com/llvm/llvm-project/pull/72722
Added:
Modified:
llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 06cccab322100f6..170e267356d58ec 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -4696,6 +4696,19 @@ struct VarArgHelperBase : public VarArgHelper {
"_msarg_va_o");
}
+ void CleanUnusedTLS(IRBuilder<> &IRB, Value *ShadowBase,
+ unsigned BaseOffset) {
+ // The tails of __msan_va_arg_tls is not large enough to fit full
+ // value shadow, but it will be copied to backup anyway. Make it
+ // clean.
+ if (BaseOffset >= kParamTLSSize)
+ return;
+ Value *TailSize =
+ ConstantInt::getSigned(IRB.getInt32Ty(), kParamTLSSize - BaseOffset);
+ IRB.CreateMemSet(ShadowBase, ConstantInt::getNullValue(IRB.getInt8Ty()),
+ TailSize, Align(8));
+ }
+
void unpoisonVAListTagForInst(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *VAListTag = I.getArgOperand(0);
@@ -4779,23 +4792,6 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
unsigned OverflowOffset = AMD64FpEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
- auto CleanUnusedTLS = [&](Value *ShadowBase, unsigned BaseOffset) {
- // Make sure we don't overflow __msan_va_arg_tls.
- if (OverflowOffset <= kParamTLSSize)
- return false; // Not needed, end is not reacheed.
-
- // The tails of __msan_va_arg_tls is not large enough to fit full
- // value shadow, but it will be copied to backup anyway. Make it
- // clean.
- if (BaseOffset < kParamTLSSize) {
- Value *TailSize = ConstantInt::getSigned(IRB.getInt32Ty(),
- kParamTLSSize - BaseOffset);
- IRB.CreateMemSet(ShadowBase, ConstantInt::getNullValue(IRB.getInt8Ty()),
- TailSize, Align(8));
- }
- return true; // Incomplete
- };
-
for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
@@ -4817,8 +4813,10 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
OverflowOffset += AlignedSize;
- if (CleanUnusedTLS(ShadowBase, BaseOffset))
+ if (OverflowOffset > kParamTLSSize) {
+ CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
continue; // We have no space to copy shadow there.
+ }
Value *ShadowPtr, *OriginPtr;
std::tie(ShadowPtr, OriginPtr) =
@@ -4863,8 +4861,11 @@ struct VarArgAMD64Helper : public VarArgHelperBase {
OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
}
OverflowOffset += AlignedSize;
- if (CleanUnusedTLS(ShadowBase, BaseOffset))
- continue; // We have no space to copy shadow there.
+ if (OverflowOffset > kParamTLSSize) {
+ // We have no space to copy shadow there.
+ CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
+ continue;
+ }
}
// Take fixed arguments into account for GpOffset and FpOffset,
// but don't actually store shadows for them.
@@ -5118,17 +5119,21 @@ struct VarArgAArch64Helper : public VarArgHelperBase {
if (IsFixed)
continue;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
- Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
- alignTo(ArgSize, 8));
- OverflowOffset += alignTo(ArgSize, 8);
+ uint64_t AlignedSize = alignTo(ArgSize, 8);
+ unsigned BaseOffset = OverflowOffset;
+ Base = getShadowPtrForVAArgument(A->getType(), IRB, BaseOffset);
+ OverflowOffset += AlignedSize;
+ if (OverflowOffset > kParamTLSSize) {
+ // We have no space to copy shadow there.
+ CleanUnusedTLS(IRB, Base, BaseOffset);
+ continue;
+ }
break;
}
// Count Gp/Vr fixed arguments to their respective offsets, but don't
// bother to actually store a shadow.
if (IsFixed)
continue;
- if (!Base)
- continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *OverflowSize =
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
index 66f4d61f444f34e..ff9d4eea1596bc4 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
@@ -1859,6 +1859,7 @@ define linkonce_odr dso_local void @_Z4test2I11LongDouble4EvT_([4 x fp128] align
; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8
; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8
; CHECK-NEXT: store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 768) to ptr), i8 0, i32 32, i1 false)
; CHECK-NEXT: store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], i32 noundef 20, [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]])
; CHECK-NEXT: ret void
More information about the llvm-branch-commits
mailing list