[llvm] bf20488 - [msan] Change logic of ClInstrumentationWithCallThreshold

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 14 14:58:28 PDT 2022


Author: Vitaly Buka
Date: 2022-09-14T14:58:12-07:00
New Revision: bf204881b6376c57aa56668612321658c4606e38

URL: https://github.com/llvm/llvm-project/commit/bf204881b6376c57aa56668612321658c4606e38
DIFF: https://github.com/llvm/llvm-project/commit/bf204881b6376c57aa56668612321658c4606e38.diff

LOG: [msan] Change logic of ClInstrumentationWithCallThreshold

According to logs, ClInstrumentationWithCallThreshold is workaround
for slow backend with large number of basic blocks.
However, I can't reproduce that one, but I see significant slowdown
after ClCheckConstantShadow. Without ClInstrumentationWithCallThreshold
compiler is able to eliminate many of the branches.

So maybe we should drop ClInstrumentationWithCallThreshold completly.

For now I just change the logic to ignore constant shadow so it will
not trigger callback fallback too early.

Reviewed By: kstoimenov

Differential Revision: https://reviews.llvm.org/D133880

Added: 
    

Modified: 
    llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
    llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 977a2346a3724..f75d5a1a2a09f 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1117,6 +1117,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   SmallSetVector<AllocaInst *, 16> AllocaSet;
   SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
   SmallVector<StoreInst *, 16> StoreList;
+  int64_t SplittableBlocksCount = 0;
 
   MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
                          const TargetLibraryInfo &TLI)
@@ -1148,6 +1149,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
                << F.getName() << "'\n");
   }
 
+  bool instrumentWithCalls(Value *V) {
+    // Constants likely will be eliminated by follow-up passes.
+    if (isa<Constant>(V))
+      return false;
+
+    ++SplittableBlocksCount;
+    return ClInstrumentationWithCallThreshold >= 0 &&
+           SplittableBlocksCount > ClInstrumentationWithCallThreshold;
+  }
+
   bool isInPrologue(Instruction &I) {
     return I.getParent() == FnPrologueEnd->getParent() &&
            (&I == FnPrologueEnd || I.comesBefore(FnPrologueEnd));
@@ -1206,7 +1217,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   }
 
   void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
-                   Value *OriginPtr, Align Alignment, bool AsCall) {
+                   Value *OriginPtr, Align Alignment) {
     const DataLayout &DL = F.getParent()->getDataLayout();
     const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
     unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
@@ -1228,7 +1239,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
     unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
-    if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
+    if (instrumentWithCalls(ConvertedShadow) &&
+        SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
       FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
       Value *ConvertedShadow2 =
           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
@@ -1247,7 +1259,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     }
   }
 
-  void materializeStores(bool InstrumentWithCalls) {
+  void materializeStores() {
     for (StoreInst *SI : StoreList) {
       IRBuilder<> IRB(SI);
       Value *Val = SI->getValueOperand();
@@ -1269,7 +1281,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
       if (MS.TrackOrigins && !SI->isAtomic())
         storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
-                    OriginAlignment, InstrumentWithCalls);
+                    OriginAlignment);
     }
   }
 
@@ -1319,11 +1331,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   }
 
   void materializeOneCheck(IRBuilder<> &IRB, Value *ConvertedShadow,
-                           Value *Origin, bool AsCall) {
+                           Value *Origin) {
     const DataLayout &DL = F.getParent()->getDataLayout();
     unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
     unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
-    if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
+    if (instrumentWithCalls(ConvertedShadow) &&
+        SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
       FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
       Value *ConvertedShadow2 =
           IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
@@ -1345,12 +1358,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   }
 
   void materializeInstructionChecks(
-      bool InstrumentWithCalls,
       ArrayRef<ShadowOriginAndInsertPoint> InstructionChecks) {
     const DataLayout &DL = F.getParent()->getDataLayout();
     // Disable combining in some cases. TrackOrigins checks each shadow to pick
-    // correct origin. InstrumentWithCalls expects to reduce shadow using API.
-    bool Combine = !InstrumentWithCalls && !MS.TrackOrigins;
+    // correct origin.
+    bool Combine = !MS.TrackOrigins;
     Instruction *Instruction = InstructionChecks.front().OrigIns;
     Value *Shadow = nullptr;
     for (const auto &ShadowData : InstructionChecks) {
@@ -1377,9 +1389,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
         // Fallback to runtime check, which still can be optimized out later.
       }
 
+      // Work around to keep existing tests.
+      // FIXME: update tests and remove.
+      if (ClInstrumentationWithCallThreshold >= 0 &&
+          SplittableBlocksCount >= ClInstrumentationWithCallThreshold) {
+        materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
+        continue;
+      }
+
       if (!Combine) {
-        materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin,
-                            InstrumentWithCalls);
+        materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
         continue;
       }
 
@@ -1390,11 +1409,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     if (Shadow) {
       assert(Combine);
       IRBuilder<> IRB(Instruction);
-      materializeOneCheck(IRB, Shadow, nullptr, false);
+      materializeOneCheck(IRB, Shadow, nullptr);
     }
   }
 
-  void materializeChecks(bool InstrumentWithCalls) {
+  void materializeChecks() {
     llvm::stable_sort(InstrumentationList,
                       [](const ShadowOriginAndInsertPoint &L,
                          const ShadowOriginAndInsertPoint &R) {
@@ -1409,8 +1428,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
                          return L != R.OrigIns;
                        });
       // Process all checks of instruction at once.
-      materializeInstructionChecks(InstrumentWithCalls,
-                                   ArrayRef<ShadowOriginAndInsertPoint>(I, J));
+      materializeInstructionChecks(ArrayRef<ShadowOriginAndInsertPoint>(I, J));
       I = J;
     }
 
@@ -1474,16 +1492,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     for (AllocaInst *AI : AllocaSet)
       instrumentAlloca(*AI);
 
-    bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
-                               InstrumentationList.size() + StoreList.size() >
-                                   (unsigned)ClInstrumentationWithCallThreshold;
-
     // Insert shadow value checks.
-    materializeChecks(InstrumentWithCalls);
+    materializeChecks();
 
     // Delayed instrumentation of StoreInst.
     // This may not add new address checks.
-    materializeStores(InstrumentWithCalls);
+    materializeStores();
 
     return true;
   }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll b/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
index f3456ae1ac626..2461a3e832614 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
@@ -84,3 +84,11 @@ define <4 x i32> @test65(<4 x i32> %vec, i65 %idx, i32 %x) sanitize_memory {
 ; CHECK:         icmp ne i65 %{{.*}}, 0
 ; CHECK-NOT:     call void @__msan_maybe_warning_
 ; CHECK:         ret <4 x i32>
+
+define <4 x i32> @testUndef(<4 x i32> %vec, i32 %x) sanitize_memory {
+  %vec1 = insertelement <4 x i32> %vec, i32 undef, i32 undef
+  ret <4 x i32> %vec1
+}
+; CHECK-LABEL: @testUndef(
+; CHECK-NOT:     call void @__msan_maybe_warning_
+; CHECK:         ret <4 x i32>


        


More information about the llvm-commits mailing list