[llvm] e0f7ef4 - [msan] Fix handling of ParamTLS overflow.

Evgenii Stepanov via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 4 13:52:37 PDT 2023


Author: Evgenii Stepanov
Date: 2023-04-04T13:52:09-07:00
New Revision: e0f7ef4b9ccf906f5382578a7ac0a0ba8d6d4f2b

URL: https://github.com/llvm/llvm-project/commit/e0f7ef4b9ccf906f5382578a7ac0a0ba8d6d4f2b
DIFF: https://github.com/llvm/llvm-project/commit/e0f7ef4b9ccf906f5382578a7ac0a0ba8d6d4f2b.diff

LOG: [msan] Fix handling of ParamTLS overflow.

Ironically, MSan copies uninitialized data off the stack into
VAArgTLSCopy in the callee-side handling of va_start. Clamp the copy
size to the actual length of the buffer, and zero-initialize the
remainder.

Differential Revision: https://reviews.llvm.org/D146858

Added: 
    

Modified: 
    llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
    llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
    llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
    llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
    llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 953ce72c1cec9..93667c9482921 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -4609,8 +4609,8 @@ struct VarArgAMD64Helper : public VarArgHelper {
   Function &F;
   MemorySanitizer &MS;
   MemorySanitizerVisitor &MSV;
-  Value *VAArgTLSCopy = nullptr;
-  Value *VAArgTLSOriginCopy = nullptr;
+  AllocaInst *VAArgTLSCopy = nullptr;
+  AllocaInst *VAArgTLSOriginCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
   SmallVector<CallInst *, 16> VAStartInstrumentationList;
@@ -4806,11 +4806,20 @@ struct VarArgAMD64Helper : public VarArgHelper {
       Value *CopySize = IRB.CreateAdd(
           ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
-      IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
+      VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
+      IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
+                       CopySize, kShadowTLSAlignment, false);
+
+      Value *SrcSize = IRB.CreateBinaryIntrinsic(
+          Intrinsic::umin, CopySize,
+          ConstantInt::get(MS.IntptrTy, kParamTLSSize));
+      IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
+                       kShadowTLSAlignment, SrcSize);
       if (MS.TrackOrigins) {
         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
-        IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
-                         Align(8), CopySize);
+        VAArgTLSOriginCopy->setAlignment(kShadowTLSAlignment);
+        IRB.CreateMemCpy(VAArgTLSOriginCopy, kShadowTLSAlignment,
+                         MS.VAArgOriginTLS, kShadowTLSAlignment, SrcSize);
       }
     }
 
@@ -4868,7 +4877,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
   Function &F;
   MemorySanitizer &MS;
   MemorySanitizerVisitor &MSV;
-  Value *VAArgTLSCopy = nullptr;
+  AllocaInst *VAArgTLSCopy = nullptr;
   Value *VAArgSize = nullptr;
 
   SmallVector<CallInst *, 16> VAStartInstrumentationList;
@@ -4953,7 +4962,15 @@ struct VarArgMIPS64Helper : public VarArgHelper {
       // If there is a va_start in this function, make a backup copy of
       // va_arg_tls somewhere in the function entry block.
       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
-      IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
+      VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
+      IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
+                       CopySize, kShadowTLSAlignment, false);
+
+      Value *SrcSize = IRB.CreateBinaryIntrinsic(
+          Intrinsic::umin, CopySize,
+          ConstantInt::get(MS.IntptrTy, kParamTLSSize));
+      IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
+                       kShadowTLSAlignment, SrcSize);
     }
 
     // Instrument va_start.
@@ -4995,7 +5012,7 @@ struct VarArgAArch64Helper : public VarArgHelper {
   Function &F;
   MemorySanitizer &MS;
   MemorySanitizerVisitor &MSV;
-  Value *VAArgTLSCopy = nullptr;
+  AllocaInst *VAArgTLSCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
   SmallVector<CallInst *, 16> VAStartInstrumentationList;
@@ -5139,7 +5156,15 @@ struct VarArgAArch64Helper : public VarArgHelper {
       Value *CopySize = IRB.CreateAdd(
           ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
-      IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
+      VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
+      IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
+                       CopySize, kShadowTLSAlignment, false);
+
+      Value *SrcSize = IRB.CreateBinaryIntrinsic(
+          Intrinsic::umin, CopySize,
+          ConstantInt::get(MS.IntptrTy, kParamTLSSize));
+      IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
+                       kShadowTLSAlignment, SrcSize);
     }
 
     Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
@@ -5239,7 +5264,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
   Function &F;
   MemorySanitizer &MS;
   MemorySanitizerVisitor &MSV;
-  Value *VAArgTLSCopy = nullptr;
+  AllocaInst *VAArgTLSCopy = nullptr;
   Value *VAArgSize = nullptr;
 
   SmallVector<CallInst *, 16> VAStartInstrumentationList;
@@ -5382,8 +5407,17 @@ struct VarArgPowerPC64Helper : public VarArgHelper {
     if (!VAStartInstrumentationList.empty()) {
       // If there is a va_start in this function, make a backup copy of
       // va_arg_tls somewhere in the function entry block.
+
       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
-      IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
+      VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
+      IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
+                       CopySize, kShadowTLSAlignment, false);
+
+      Value *SrcSize = IRB.CreateBinaryIntrinsic(
+          Intrinsic::umin, CopySize,
+          ConstantInt::get(MS.IntptrTy, kParamTLSSize));
+      IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
+                       kShadowTLSAlignment, SrcSize);
     }
 
     // Instrument va_start.
@@ -5426,8 +5460,8 @@ struct VarArgSystemZHelper : public VarArgHelper {
   MemorySanitizer &MS;
   MemorySanitizerVisitor &MSV;
   bool IsSoftFloatABI;
-  Value *VAArgTLSCopy = nullptr;
-  Value *VAArgTLSOriginCopy = nullptr;
+  AllocaInst *VAArgTLSCopy = nullptr;
+  AllocaInst *VAArgTLSOriginCopy = nullptr;
   Value *VAArgOverflowSize = nullptr;
 
   SmallVector<CallInst *, 16> VAStartInstrumentationList;
@@ -5696,11 +5730,20 @@ struct VarArgSystemZHelper : public VarArgHelper {
           IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
                         VAArgOverflowSize);
       VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
-      IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
+      VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
+      IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
+                       CopySize, kShadowTLSAlignment, false);
+
+      Value *SrcSize = IRB.CreateBinaryIntrinsic(
+          Intrinsic::umin, CopySize,
+          ConstantInt::get(MS.IntptrTy, kParamTLSSize));
+      IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
+                       kShadowTLSAlignment, SrcSize);
       if (MS.TrackOrigins) {
         VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
-        IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
-                         Align(8), CopySize);
+        VAArgTLSOriginCopy->setAlignment(kShadowTLSAlignment);
+        IRB.CreateMemCpy(VAArgTLSOriginCopy, kShadowTLSAlignment,
+                         MS.VAArgOriginTLS, kShadowTLSAlignment, SrcSize);
       }
     }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
index 7019c645d3726..8c23d95422426 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
@@ -19,7 +19,10 @@ define i32 @foo(i32 %guard, ...) {
 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
 
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[B]], i1 false)
+; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[B]], i1 false)
+
+; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
 
 declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 declare void @llvm.va_start(ptr) #2

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
index be7ccf00221ef..17f4b826be0be 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
@@ -19,7 +19,10 @@ define i32 @foo(i32 %guard, ...) {
 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
 
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[B]], i1 false)
+; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[B]], i1 false)
+
+; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
 
 declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 declare void @llvm.va_start(ptr) #2

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
index 5e9003511a638..db09c5a477186 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
@@ -19,7 +19,10 @@ define i32 @foo(i32 %guard, ...) {
 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
 
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[B]], i1 false)
+; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[B]], i1 false)
+
+; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
 
 declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 declare void @llvm.va_start(ptr) #2

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
index 70c76a81e1cd0..63e11dc7cadd0 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
@@ -19,7 +19,10 @@ define i32 @foo(i32 %guard, ...) {
 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
 
-; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[B]], i1 false)
+; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[B]], i1 false)
+
+; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
 
 declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 declare void @llvm.va_start(ptr) #2

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
index 3b4ef9245bb08..21f3311a57efa 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
@@ -501,10 +501,12 @@ define void @VAStart(i32 %x, ...) sanitize_memory {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]], !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 1, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP1]], i1 false), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = alloca i8, i64 [[TMP1]], align 1, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[TMP1]], i1 false), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false), !dbg [[DBG1]]
+; CHECK-NEXT:    [[SRCSZ:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800), !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[SRCSZ]], i1 false), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = alloca i8, i64 [[TMP1]], align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[SRCSZ]], i1 false), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
index f4fbf5fa4b118..309921bfc352b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
@@ -341,9 +341,11 @@ attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
 ; Register save area is 48 bytes for non-SSE builds.
 ; CHECK: [[SIZE:%[0-9]+]] = add i64 48, [[OSIZE]]
 ; CHECK: [[SHADOWS:%[0-9]+]] = alloca i8, i64 [[SIZE]]
-; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[SHADOWS]], ptr align 8 [[VA_ARG_SHADOW]], i64 [[SIZE]]
+; CHECK: call void @llvm.memset{{.*}}(ptr align 8 [[SHADOWS]], i8 0, i64 [[SIZE]], i1 false)
+; CHECK: [[COPYSZ:%[0-9]+]] = call i64 @llvm.umin.i64(i64 [[SIZE]], i64 800)
+; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[SHADOWS]], ptr align 8 [[VA_ARG_SHADOW]], i64 [[COPYSZ]]
 ; CHECK: [[ORIGINS:%[0-9]+]] = alloca i8, i64 [[SIZE]]
-; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[ORIGINS]], ptr align 8 [[VA_ARG_ORIGIN]], i64 [[SIZE]]
+; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[ORIGINS]], ptr align 8 [[VA_ARG_ORIGIN]], i64 [[COPYSZ]]
 ; CHECK: call i32 @VAListFn
 
 ; Function Attrs: nounwind uwtable


        


More information about the llvm-commits mailing list