[compiler-rt] [llvm] [Asan] Add "funclet" OpBundle to generated runtime calls if required by EH personality (PR #82533)

via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 1 11:20:44 PST 2024


https://github.com/sylvain-audi updated https://github.com/llvm/llvm-project/pull/82533

>From 1bd60452d76dcd8efe15365d62c317d5ac880c47 Mon Sep 17 00:00:00 2001
From: Sylvain Audi <sylvain.audi at ubisoft.com>
Date: Wed, 21 Feb 2024 15:26:23 -0500
Subject: [PATCH 1/3] [Asan] Add "funclet" OpBundle to Asan calls, when needed
 by EH Personality

Previously, when ASan instrumentation introduced runtime calls into EH pads, the appropriate funclet token expected by WinEHPrepare were missing.
The BB is then seen as invalid and discard by WinEHPrepare, leading to invalid code that crashes.

Also fixed localescape test, switching its EH personality to match code without funclets.

This PR is based on the Phabricator patch https://reviews.llvm.org/D143108

Fixes https://github.com/llvm/llvm-project/issues/64990
---
 .../asan/TestCases/Windows/issue64990.cpp     |  18 +
 .../Instrumentation/AddressSanitizer.cpp      | 232 +++++---
 .../AddressSanitizer/asan-funclet.ll          | 527 +++++++++++-------
 .../AddressSanitizer/localescape.ll           |   4 +-
 4 files changed, 505 insertions(+), 276 deletions(-)
 create mode 100644 compiler-rt/test/asan/TestCases/Windows/issue64990.cpp

diff --git a/compiler-rt/test/asan/TestCases/Windows/issue64990.cpp b/compiler-rt/test/asan/TestCases/Windows/issue64990.cpp
new file mode 100644
index 00000000000000..a5a46b5a81ddca
--- /dev/null
+++ b/compiler-rt/test/asan/TestCases/Windows/issue64990.cpp
@@ -0,0 +1,18 @@
+// Repro for the issue #64990: Asan with Windows EH generates __asan_xxx runtime calls without required funclet tokens
+// RUN: %clang_cl_asan %Od %s -EHsc %Fe%t
+// RUN: not %run %t 2>&1 | FileCheck %s
+
+char buff1[6] = "hello";
+char buff2[6] = "hello";
+
+int main(int argc, char **argv) {
+  try {
+    throw 1;
+  } catch (...) {
+    // Make asan generate call to __asan_memcpy inside the EH pad.
+    __builtin_memcpy(buff1, buff2 + 3, 6);
+  }
+  return 0;
+}
+
+// CHECK: SUMMARY: AddressSanitizer: global-buffer-overflow {{.*}} in __asan_memcpy
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 5d5c4ea57ed56c..f7aef729dd348d 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -43,6 +43,7 @@
 #include "llvm/IR/DebugInfoMetadata.h"
 #include "llvm/IR/DebugLoc.h"
 #include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/EHPersonalities.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/GlobalAlias.h"
 #include "llvm/IR/GlobalValue.h"
@@ -642,6 +643,70 @@ static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) {
 }
 
 namespace {
+/// Helper RAII class to keep track of the inserted asan runtime calls during a
+/// pass on a single Function. Upon end of scope, detects and applies the
+/// required funclet OpBundle.
+class RuntimeCallInserter {
+  Function *OwnerFn = nullptr;
+  bool TrackInsertedCalls = false;
+  std::vector<CallInst *> InsertedCalls;
+
+public:
+  RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
+    if (Fn.hasPersonalityFn()) {
+      auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
+      if (isScopedEHPersonality(Personality))
+        TrackInsertedCalls = true;
+    }
+  }
+
+  ~RuntimeCallInserter() {
+    if (!TrackInsertedCalls || InsertedCalls.empty())
+      return;
+
+    DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*OwnerFn);
+    for (CallInst *CI : InsertedCalls) {
+      BasicBlock *BB = CI->getParent();
+      assert(BB && "Instruction doesn't belong to a BasicBlock");
+      assert(BB->getParent() == OwnerFn &&
+             "Instruction doesn't belong to the expected Function!");
+
+      ColorVector &Colors = BlockColors[BB];
+      // funclet opbundles are only valid in monochromatic BBs.
+      // Note that unreachable BBs are seen as colorless by colorEHFunclets()
+      // and will be DCE'ed later.
+      if (Colors.size() != 1) {
+        OwnerFn->getContext().emitError(
+            "Instruction's BasicBlock is not monochromatic");
+        continue;
+      }
+
+      BasicBlock *Color = Colors.front();
+      Instruction *EHPad = Color->getFirstNonPHI();
+
+      if (EHPad && EHPad->isEHPad()) {
+        // Replace CI with a clone with an added funclet OperandBundle
+        OperandBundleDef OB("funclet", EHPad);
+        auto *NewCall =
+            CallBase::addOperandBundle(CI, LLVMContext::OB_funclet, OB, CI);
+        NewCall->copyMetadata(*CI);
+        CI->replaceAllUsesWith(NewCall);
+        CI->eraseFromParent();
+      }
+    }
+  }
+
+  CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
+                              ArrayRef<Value *> Args = {},
+                              const Twine &Name = "") {
+    assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
+
+    CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
+    if (TrackInsertedCalls)
+      InsertedCalls.push_back(Inst);
+    return Inst;
+  }
+};
 
 /// AddressSanitizer: instrument the code in module to find memory bugs.
 struct AddressSanitizer {
@@ -691,12 +756,14 @@ struct AddressSanitizer {
 
   void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
                      InterestingMemoryOperand &O, bool UseCalls,
-                     const DataLayout &DL);
-  void instrumentPointerComparisonOrSubtraction(Instruction *I);
+                     const DataLayout &DL, RuntimeCallInserter &RTCI);
+  void instrumentPointerComparisonOrSubtraction(Instruction *I,
+                                                RuntimeCallInserter &RTCI);
   void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
                          Value *Addr, MaybeAlign Alignment,
                          uint32_t TypeStoreSize, bool IsWrite,
-                         Value *SizeArgument, bool UseCalls, uint32_t Exp);
+                         Value *SizeArgument, bool UseCalls, uint32_t Exp,
+                         RuntimeCallInserter &RTCI);
   Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
                                        Instruction *InsertBefore, Value *Addr,
                                        uint32_t TypeStoreSize, bool IsWrite,
@@ -707,20 +774,22 @@ struct AddressSanitizer {
                                         Instruction *InsertBefore, Value *Addr,
                                         TypeSize TypeStoreSize, bool IsWrite,
                                         Value *SizeArgument, bool UseCalls,
-                                        uint32_t Exp);
+                                        uint32_t Exp,
+                                        RuntimeCallInserter &RTCI);
   void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
                                    Type *IntptrTy, Value *Mask, Value *EVL,
                                    Value *Stride, Instruction *I, Value *Addr,
                                    MaybeAlign Alignment, unsigned Granularity,
                                    Type *OpType, bool IsWrite,
                                    Value *SizeArgument, bool UseCalls,
-                                   uint32_t Exp);
+                                   uint32_t Exp, RuntimeCallInserter &RTCI);
   Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
                            Value *ShadowValue, uint32_t TypeStoreSize);
   Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
                                  bool IsWrite, size_t AccessSizeIndex,
-                                 Value *SizeArgument, uint32_t Exp);
-  void instrumentMemIntrinsic(MemIntrinsic *MI);
+                                 Value *SizeArgument, uint32_t Exp,
+                                 RuntimeCallInserter &RTCI);
+  void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
   bool suppressInstrumentationSiteForDebug(int &Instrumented);
   bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
@@ -912,6 +981,7 @@ class ModuleAddressSanitizer {
 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
   Function &F;
   AddressSanitizer &ASan;
+  RuntimeCallInserter &RTCI;
   DIBuilder DIB;
   LLVMContext *C;
   Type *IntptrTy;
@@ -948,10 +1018,12 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
   bool HasReturnsTwiceCall = false;
   bool PoisonStack;
 
-  FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
-      : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
-        C(ASan.C), IntptrTy(ASan.IntptrTy),
-        IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
+  FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
+                        RuntimeCallInserter &RTCI)
+      : F(F), ASan(ASan), RTCI(RTCI),
+        DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
+        IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
+        Mapping(ASan.Mapping),
         PoisonStack(ClStack &&
                     !Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {}
 
@@ -1034,8 +1106,8 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
                                      DynamicAreaOffset);
     }
 
-    IRB.CreateCall(
-        AsanAllocasUnpoisonFunc,
+    RTCI.createRuntimeCall(
+        IRB, AsanAllocasUnpoisonFunc,
         {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
   }
 
@@ -1251,16 +1323,18 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
 }
 
 // Instrument memset/memmove/memcpy
-void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
+void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
+                                              RuntimeCallInserter &RTCI) {
   InstrumentationIRBuilder IRB(MI);
   if (isa<MemTransferInst>(MI)) {
-    IRB.CreateCall(isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
-                   {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
-                    IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
-                    IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
+  RTCI.createRuntimeCall(
+      IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
+      {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
+       IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
+       IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
   } else if (isa<MemSetInst>(MI)) {
-    IRB.CreateCall(
-        AsanMemset,
+    RTCI.createRuntimeCall(
+        IRB, AsanMemset,
         {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
@@ -1498,7 +1572,7 @@ bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
 }
 
 void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
-    Instruction *I) {
+    Instruction *I, RuntimeCallInserter &RTCI) {
   IRBuilder<> IRB(I);
   FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
   Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
@@ -1506,7 +1580,7 @@ void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
     if (i->getType()->isPointerTy())
       i = IRB.CreatePointerCast(i, IntptrTy);
   }
-  IRB.CreateCall(F, Param);
+  RTCI.createRuntimeCall(IRB, F, Param);
 }
 
 static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
@@ -1514,7 +1588,7 @@ static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
                                 MaybeAlign Alignment, unsigned Granularity,
                                 TypeSize TypeStoreSize, bool IsWrite,
                                 Value *SizeArgument, bool UseCalls,
-                                uint32_t Exp) {
+                                uint32_t Exp, RuntimeCallInserter &RTCI) {
   // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
   // if the data is properly aligned.
   if (!TypeStoreSize.isScalable()) {
@@ -1529,18 +1603,19 @@ static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
           *Alignment >= FixedSize / 8)
         return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
                                        FixedSize, IsWrite, nullptr, UseCalls,
-                                       Exp);
+                                       Exp, RTCI);
     }
   }
   Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
-                                         IsWrite, nullptr, UseCalls, Exp);
+                                         IsWrite, nullptr, UseCalls, Exp, RTCI);
 }
 
 void AddressSanitizer::instrumentMaskedLoadOrStore(
     AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
     Value *EVL, Value *Stride, Instruction *I, Value *Addr,
     MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
-    Value *SizeArgument, bool UseCalls, uint32_t Exp) {
+    Value *SizeArgument, bool UseCalls, uint32_t Exp,
+    RuntimeCallInserter &RTCI) {
   auto *VTy = cast<VectorType>(OpType);
   TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
   auto Zero = ConstantInt::get(IntptrTy, 0);
@@ -1595,15 +1670,16 @@ void AddressSanitizer::instrumentMaskedLoadOrStore(
     } else {
       InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
     }
-    doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(),
-                        InstrumentedAddress, Alignment, Granularity,
-                        ElemTypeSize, IsWrite, SizeArgument, UseCalls, Exp);
+    doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
+                        Alignment, Granularity, ElemTypeSize, IsWrite,
+                        SizeArgument, UseCalls, Exp, RTCI);
   });
 }
 
 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
                                      InterestingMemoryOperand &O, bool UseCalls,
-                                     const DataLayout &DL) {
+                                     const DataLayout &DL,
+                                     RuntimeCallInserter &RTCI) {
   Value *Addr = O.getPtr();
 
   // Optimization experiments.
@@ -1649,11 +1725,11 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
     instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
                                 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
                                 Granularity, O.OpType, O.IsWrite, nullptr,
-                                UseCalls, Exp);
+                                UseCalls, Exp, RTCI);
   } else {
     doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
-                        Granularity, O.TypeStoreSize, O.IsWrite, nullptr, UseCalls,
-                        Exp);
+                        Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
+                        UseCalls, Exp, RTCI);
   }
 }
 
@@ -1661,24 +1737,25 @@ Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
                                                  Value *Addr, bool IsWrite,
                                                  size_t AccessSizeIndex,
                                                  Value *SizeArgument,
-                                                 uint32_t Exp) {
+                                                 uint32_t Exp,
+                                                 RuntimeCallInserter &RTCI) {
   InstrumentationIRBuilder IRB(InsertBefore);
   Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
   CallInst *Call = nullptr;
   if (SizeArgument) {
     if (Exp == 0)
-      Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
-                            {Addr, SizeArgument});
+      Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
+                                    {Addr, SizeArgument});
     else
-      Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
-                            {Addr, SizeArgument, ExpVal});
+      Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
+                                    {Addr, SizeArgument, ExpVal});
   } else {
     if (Exp == 0)
-      Call =
-          IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
+      Call = RTCI.createRuntimeCall(
+          IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
     else
-      Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
-                            {Addr, ExpVal});
+      Call = RTCI.createRuntimeCall(
+          IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
   }
 
   Call->setCannotMerge();
@@ -1754,7 +1831,8 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
                                          MaybeAlign Alignment,
                                          uint32_t TypeStoreSize, bool IsWrite,
                                          Value *SizeArgument, bool UseCalls,
-                                         uint32_t Exp) {
+                                         uint32_t Exp,
+                                         RuntimeCallInserter &RTCI) {
   if (TargetTriple.isAMDGPU()) {
     InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
                                            TypeStoreSize, IsWrite, SizeArgument);
@@ -1779,11 +1857,12 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
   if (UseCalls) {
     if (Exp == 0)
-      IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
-                     AddrLong);
+      RTCI.createRuntimeCall(
+          IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
     else
-      IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
-                     {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
+      RTCI.createRuntimeCall(
+          IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
+          {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
     return;
   }
 
@@ -1830,8 +1909,8 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
     CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
   }
 
-  Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
-                                         AccessSizeIndex, SizeArgument, Exp);
+  Instruction *Crash = generateCrashCode(
+      CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
   if (OrigIns->getDebugLoc())
     Crash->setDebugLoc(OrigIns->getDebugLoc());
 }
@@ -1841,8 +1920,9 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
 // to report the actual access size.
 void AddressSanitizer::instrumentUnusualSizeOrAlignment(
-    Instruction *I, Instruction *InsertBefore, Value *Addr, TypeSize TypeStoreSize,
-    bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
+    Instruction *I, Instruction *InsertBefore, Value *Addr,
+    TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
+    uint32_t Exp, RuntimeCallInserter &RTCI) {
   InstrumentationIRBuilder IRB(InsertBefore);
   Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
   Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
@@ -1850,19 +1930,21 @@ void AddressSanitizer::instrumentUnusualSizeOrAlignment(
   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
   if (UseCalls) {
     if (Exp == 0)
-      IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
-                     {AddrLong, Size});
+      RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
+                             {AddrLong, Size});
     else
-      IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
-                     {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
+      RTCI.createRuntimeCall(
+          IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
+          {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
   } else {
     Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
     Value *LastByte = IRB.CreateIntToPtr(
         IRB.CreateAdd(AddrLong, SizeMinusOne),
         Addr->getType());
-    instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp);
+    instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
+                      RTCI);
     instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
-                      Exp);
+                      Exp, RTCI);
   }
 }
 
@@ -2881,6 +2963,8 @@ bool AddressSanitizer::instrumentFunction(Function &F,
 
   FunctionStateRAII CleanupObj(this);
 
+  RuntimeCallInserter RTCI(F);
+
   FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
 
   // We can't instrument allocas used with llvm.localescape. Only static allocas
@@ -2963,27 +3047,27 @@ bool AddressSanitizer::instrumentFunction(Function &F,
   for (auto &Operand : OperandsToInstrument) {
     if (!suppressInstrumentationSiteForDebug(NumInstrumented))
       instrumentMop(ObjSizeVis, Operand, UseCalls,
-                    F.getParent()->getDataLayout());
+                    F.getParent()->getDataLayout(), RTCI);
     FunctionModified = true;
   }
   for (auto *Inst : IntrinToInstrument) {
     if (!suppressInstrumentationSiteForDebug(NumInstrumented))
-      instrumentMemIntrinsic(Inst);
+      instrumentMemIntrinsic(Inst, RTCI);
     FunctionModified = true;
   }
 
-  FunctionStackPoisoner FSP(F, *this);
+  FunctionStackPoisoner FSP(F, *this, RTCI);
   bool ChangedStack = FSP.runOnFunction();
 
   // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
   // See e.g. https://github.com/google/sanitizers/issues/37
   for (auto *CI : NoReturnCalls) {
     IRBuilder<> IRB(CI);
-    IRB.CreateCall(AsanHandleNoReturnFunc, {});
+    RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
   }
 
   for (auto *Inst : PointerComparisonsOrSubtracts) {
-    instrumentPointerComparisonOrSubtraction(Inst);
+    instrumentPointerComparisonOrSubtraction(Inst, RTCI);
     FunctionModified = true;
   }
 
@@ -3128,9 +3212,10 @@ void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
 
     if (j - i >= ASan.MaxInlinePoisoningSize) {
       copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
-      IRB.CreateCall(AsanSetShadowFunc[Val],
-                     {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
-                      ConstantInt::get(IntptrTy, j - i)});
+      RTCI.createRuntimeCall(
+          IRB, AsanSetShadowFunc[Val],
+          {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
+           ConstantInt::get(IntptrTy, j - i)});
       Done = j;
     }
   }
@@ -3417,8 +3502,8 @@ void FunctionStackPoisoner::processStaticAllocas() {
       StackMallocIdx = StackMallocSizeClass(LocalStackSize);
       assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
       Value *FakeStackValue =
-          IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
-                           ConstantInt::get(IntptrTy, LocalStackSize));
+          RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
+                                 ConstantInt::get(IntptrTy, LocalStackSize));
       IRB.SetInsertPoint(InsBefore);
       FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
                             ConstantInt::get(IntptrTy, 0));
@@ -3428,7 +3513,8 @@ void FunctionStackPoisoner::processStaticAllocas() {
       // void *LocalStackBase = (FakeStack) ? FakeStack :
       //                        alloca(LocalStackSize);
       StackMallocIdx = StackMallocSizeClass(LocalStackSize);
-      FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx],
+      FakeStack =
+          RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
                                  ConstantInt::get(IntptrTy, LocalStackSize));
     }
     Value *NoFakeStack =
@@ -3563,8 +3649,8 @@ void FunctionStackPoisoner::processStaticAllocas() {
             IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
       } else {
         // For larger frames call __asan_stack_free_*.
-        IRBPoison.CreateCall(
-            AsanStackFreeFunc[StackMallocIdx],
+        RTCI.createRuntimeCall(
+            IRBPoison, AsanStackFreeFunc[StackMallocIdx],
             {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
       }
 
@@ -3585,8 +3671,8 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
   // For now just insert the call to ASan runtime.
   Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
   Value *SizeArg = ConstantInt::get(IntptrTy, Size);
-  IRB.CreateCall(
-      DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
+  RTCI.createRuntimeCall(
+      IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
       {AddrArg, SizeArg});
 }
 
@@ -3647,7 +3733,7 @@ void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
                     ConstantInt::get(IntptrTy, Alignment.value()));
 
   // Insert __asan_alloca_poison call for new created alloca.
-  IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
+  RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
 
   // Store the last alloca's address to DynamicAllocaLayout. We'll need this
   // for unpoisoning stuff.
diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll
index f0a5c67365ab5f..6d40e2b7eaf9c5 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/asan-funclet.ll
@@ -1,8 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
 
-; Warning! The output of this test is currently invalid.
-; It serves as a base for the bugfix patch to highlight the modified generated code.
-
 ; Test appropriate tagging of funclet for function calls generated by asan.
 ; RUN: opt -S -passes=asan,win-eh-prepare -asan-use-stack-safety=0 -asan-max-inline-poisoning-size=0 \
 ; RUN:   -asan-detect-invalid-pointer-cmp -asan-detect-invalid-pointer-sub -asan-use-after-scope < %s | FileCheck %s --check-prefixes=CHECK,CHECK-INLINE
@@ -53,137 +50,221 @@ define void @FuncletPersonality(ptr %ptrParam) sanitize_address personality ptr
 ; CHECK-INLINE-NEXT:    store i64 [[TMP12]], ptr [[ASAN_LOCAL_STACK_BASE]], align 8
 ; CHECK-INLINE-NEXT:    [[TMP13:%.*]] = add i64 [[TMP12]], 32
 ; CHECK-INLINE-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-INLINE-NEXT:    [[TMP15:%.*]] = add i64 [[TMP12]], 8528
+; CHECK-INLINE-NEXT:    [[TMP15:%.*]] = add i64 [[TMP12]], 8480
 ; CHECK-INLINE-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-INLINE-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP12]] to ptr
-; CHECK-INLINE-NEXT:    store i64 1102416563, ptr [[TMP17]], align 8
-; CHECK-INLINE-NEXT:    [[TMP18:%.*]] = add i64 [[TMP12]], 8
-; CHECK-INLINE-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-INLINE-NEXT:    store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP19]], align 8
-; CHECK-INLINE-NEXT:    [[TMP20:%.*]] = add i64 [[TMP12]], 16
-; CHECK-INLINE-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-; CHECK-INLINE-NEXT:    store i64 ptrtoint (ptr @FuncletPersonality to i64), ptr [[TMP21]], align 8
-; CHECK-INLINE-NEXT:    [[TMP22:%.*]] = lshr i64 [[TMP12]], 3
-; CHECK-INLINE-NEXT:    [[TMP23:%.*]] = add i64 [[TMP22]], [[TMP1]]
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f1(i64 [[TMP23]], i64 4)
-; CHECK-INLINE-NEXT:    [[TMP24:%.*]] = add i64 [[TMP23]], 1028
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP24]], i64 32)
-; CHECK-INLINE-NEXT:    [[TMP25:%.*]] = add i64 [[TMP23]], 1060
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP25]], i64 1)
-; CHECK-INLINE-NEXT:    [[TMP26:%.*]] = add i64 [[TMP23]], 1061
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP26]], i64 1)
-; CHECK-INLINE-NEXT:    [[TMP27:%.*]] = add i64 [[TMP23]], 1062
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP27]], i64 1)
-; CHECK-INLINE-NEXT:    [[TMP28:%.*]] = add i64 [[TMP23]], 1063
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP28]], i64 1)
-; CHECK-INLINE-NEXT:    [[TMP29:%.*]] = add i64 [[TMP23]], 1064
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP29]], i64 1)
-; CHECK-INLINE-NEXT:    [[TMP30:%.*]] = add i64 [[TMP23]], 1065
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP30]], i64 1)
-; CHECK-INLINE-NEXT:    [[TMP31:%.*]] = add i64 [[TMP23]], 1066
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP31]], i64 1)
-; CHECK-INLINE-NEXT:    [[TMP32:%.*]] = add i64 [[TMP23]], 1067
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f3(i64 [[TMP32]], i64 1)
-; CHECK-INLINE-NEXT:    [[TMP33:%.*]] = add i64 [[TMP23]], 1066
+; CHECK-INLINE-NEXT:    [[TMP17:%.*]] = add i64 [[TMP12]], 8496
+; CHECK-INLINE-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP19:%.*]] = add i64 [[TMP12]], 8512
+; CHECK-INLINE-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP21:%.*]] = add i64 [[TMP12]], 8528
+; CHECK-INLINE-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-INLINE-NEXT:    store i64 1102416563, ptr [[TMP23]], align 8
+; CHECK-INLINE-NEXT:    [[TMP24:%.*]] = add i64 [[TMP12]], 8
+; CHECK-INLINE-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-INLINE-NEXT:    store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP25]], align 8
+; CHECK-INLINE-NEXT:    [[TMP26:%.*]] = add i64 [[TMP12]], 16
+; CHECK-INLINE-NEXT:    [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to ptr
+; CHECK-INLINE-NEXT:    store i64 ptrtoint (ptr @FuncletPersonality to i64), ptr [[TMP27]], align 8
+; CHECK-INLINE-NEXT:    [[TMP28:%.*]] = lshr i64 [[TMP12]], 3
+; CHECK-INLINE-NEXT:    [[TMP29:%.*]] = add i64 [[TMP28]], [[TMP1]]
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f1(i64 [[TMP29]], i64 4)
+; CHECK-INLINE-NEXT:    [[TMP30:%.*]] = add i64 [[TMP29]], 1028
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP30]], i64 32)
+; CHECK-INLINE-NEXT:    [[TMP31:%.*]] = add i64 [[TMP29]], 1060
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP31]], i64 1)
+; CHECK-INLINE-NEXT:    [[TMP32:%.*]] = add i64 [[TMP29]], 1061
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP32]], i64 1)
+; CHECK-INLINE-NEXT:    [[TMP33:%.*]] = add i64 [[TMP29]], 1062
 ; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP33]], i64 1)
-; CHECK-INLINE-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP16]])
-; CHECK-INLINE-NEXT:    [[TMP34:%.*]] = lshr i64 [[TMP15]], 3
-; CHECK-INLINE-NEXT:    [[TMP35:%.*]] = add i64 [[TMP34]], [[TMP1]]
-; CHECK-INLINE-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
-; CHECK-INLINE-NEXT:    [[TMP37:%.*]] = load i8, ptr [[TMP36]], align 1
-; CHECK-INLINE-NEXT:    [[TMP38:%.*]] = icmp ne i8 [[TMP37]], 0
-; CHECK-INLINE-NEXT:    br i1 [[TMP38]], label [[TMP39:%.*]], label [[TMP44:%.*]], !prof [[PROF0:![0-9]+]]
-; CHECK-INLINE:       39:
-; CHECK-INLINE-NEXT:    [[TMP40:%.*]] = and i64 [[TMP15]], 7
-; CHECK-INLINE-NEXT:    [[TMP41:%.*]] = trunc i64 [[TMP40]] to i8
-; CHECK-INLINE-NEXT:    [[TMP42:%.*]] = icmp sge i8 [[TMP41]], [[TMP37]]
-; CHECK-INLINE-NEXT:    br i1 [[TMP42]], label [[TMP43:%.*]], label [[TMP44]]
-; CHECK-INLINE:       43:
-; CHECK-INLINE-NEXT:    call void @__asan_report_store1(i64 [[TMP15]]) #[[ATTR8:[0-9]+]]
+; CHECK-INLINE-NEXT:    [[TMP34:%.*]] = add i64 [[TMP29]], 1063
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP34]], i64 1)
+; CHECK-INLINE-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], 1064
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP35]], i64 1)
+; CHECK-INLINE-NEXT:    [[TMP36:%.*]] = add i64 [[TMP29]], 1065
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP36]], i64 1)
+; CHECK-INLINE-NEXT:    [[TMP37:%.*]] = add i64 [[TMP29]], 1066
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP37]], i64 1)
+; CHECK-INLINE-NEXT:    [[TMP38:%.*]] = add i64 [[TMP29]], 1067
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f3(i64 [[TMP38]], i64 1)
+; CHECK-INLINE-NEXT:    [[TMP39:%.*]] = add i64 [[TMP29]], 1066
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP39]], i64 1)
+; CHECK-INLINE-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP22]])
+; CHECK-INLINE-NEXT:    [[TMP40:%.*]] = lshr i64 [[TMP21]], 3
+; CHECK-INLINE-NEXT:    [[TMP41:%.*]] = add i64 [[TMP40]], [[TMP1]]
+; CHECK-INLINE-NEXT:    [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP43:%.*]] = load i8, ptr [[TMP42]], align 1
+; CHECK-INLINE-NEXT:    [[TMP44:%.*]] = icmp ne i8 [[TMP43]], 0
+; CHECK-INLINE-NEXT:    br i1 [[TMP44]], label [[TMP45:%.*]], label [[TMP50:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-INLINE:       45:
+; CHECK-INLINE-NEXT:    [[TMP46:%.*]] = and i64 [[TMP21]], 7
+; CHECK-INLINE-NEXT:    [[TMP47:%.*]] = trunc i64 [[TMP46]] to i8
+; CHECK-INLINE-NEXT:    [[TMP48:%.*]] = icmp sge i8 [[TMP47]], [[TMP43]]
+; CHECK-INLINE-NEXT:    br i1 [[TMP48]], label [[TMP49:%.*]], label [[TMP50]]
+; CHECK-INLINE:       49:
+; CHECK-INLINE-NEXT:    call void @__asan_report_store1(i64 [[TMP21]]) #[[ATTR8:[0-9]+]]
 ; CHECK-INLINE-NEXT:    unreachable
-; CHECK-INLINE:       44:
-; CHECK-INLINE-NEXT:    store volatile i8 0, ptr [[TMP16]], align 1
-; CHECK-INLINE-NEXT:    [[TMP45:%.*]] = add i64 [[TMP23]], 1066
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP45]], i64 1)
-; CHECK-INLINE-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP16]])
-; CHECK-INLINE-NEXT:    [[TMP46:%.*]] = alloca i8, i64 96, align 32
-; CHECK-INLINE-NEXT:    [[TMP47:%.*]] = ptrtoint ptr [[TMP46]] to i64
-; CHECK-INLINE-NEXT:    [[TMP48:%.*]] = add i64 [[TMP47]], 32
-; CHECK-INLINE-NEXT:    call void @__asan_alloca_poison(i64 [[TMP48]], i64 4)
-; CHECK-INLINE-NEXT:    [[TMP49:%.*]] = ptrtoint ptr [[TMP46]] to i64
-; CHECK-INLINE-NEXT:    store i64 [[TMP49]], ptr [[TMP0]], align 8
-; CHECK-INLINE-NEXT:    [[TMP50:%.*]] = inttoptr i64 [[TMP48]] to ptr
-; CHECK-INLINE-NEXT:    [[TMP51:%.*]] = alloca i8, i64 96, align 32
-; CHECK-INLINE-NEXT:    [[TMP52:%.*]] = ptrtoint ptr [[TMP51]] to i64
-; CHECK-INLINE-NEXT:    [[TMP53:%.*]] = add i64 [[TMP52]], 32
-; CHECK-INLINE-NEXT:    call void @__asan_alloca_poison(i64 [[TMP53]], i64 8)
-; CHECK-INLINE-NEXT:    [[TMP54:%.*]] = ptrtoint ptr [[TMP51]] to i64
-; CHECK-INLINE-NEXT:    store i64 [[TMP54]], ptr [[TMP0]], align 8
-; CHECK-INLINE-NEXT:    [[TMP55:%.*]] = inttoptr i64 [[TMP53]] to ptr
-; CHECK-INLINE-NEXT:    [[TMP56:%.*]] = lshr i64 [[TMP53]], 3
-; CHECK-INLINE-NEXT:    [[TMP57:%.*]] = add i64 [[TMP56]], [[TMP1]]
-; CHECK-INLINE-NEXT:    [[TMP58:%.*]] = inttoptr i64 [[TMP57]] to ptr
-; CHECK-INLINE-NEXT:    [[TMP59:%.*]] = load i8, ptr [[TMP58]], align 1
-; CHECK-INLINE-NEXT:    [[TMP60:%.*]] = icmp ne i8 [[TMP59]], 0
-; CHECK-INLINE-NEXT:    br i1 [[TMP60]], label [[TMP61:%.*]], label [[TMP62:%.*]]
-; CHECK-INLINE:       61:
-; CHECK-INLINE-NEXT:    call void @__asan_report_store8(i64 [[TMP53]]) #[[ATTR8]]
+; CHECK-INLINE:       50:
+; CHECK-INLINE-NEXT:    store volatile i8 0, ptr [[TMP22]], align 1
+; CHECK-INLINE-NEXT:    [[TMP51:%.*]] = add i64 [[TMP29]], 1066
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP51]], i64 1)
+; CHECK-INLINE-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP22]])
+; CHECK-INLINE-NEXT:    [[TMP52:%.*]] = alloca i8, i64 96, align 32
+; CHECK-INLINE-NEXT:    [[TMP53:%.*]] = ptrtoint ptr [[TMP52]] to i64
+; CHECK-INLINE-NEXT:    [[TMP54:%.*]] = add i64 [[TMP53]], 32
+; CHECK-INLINE-NEXT:    call void @__asan_alloca_poison(i64 [[TMP54]], i64 4)
+; CHECK-INLINE-NEXT:    [[TMP55:%.*]] = ptrtoint ptr [[TMP52]] to i64
+; CHECK-INLINE-NEXT:    store i64 [[TMP55]], ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT:    [[TMP56:%.*]] = inttoptr i64 [[TMP54]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP57:%.*]] = alloca i8, i64 96, align 32
+; CHECK-INLINE-NEXT:    [[TMP58:%.*]] = ptrtoint ptr [[TMP57]] to i64
+; CHECK-INLINE-NEXT:    [[TMP59:%.*]] = add i64 [[TMP58]], 32
+; CHECK-INLINE-NEXT:    call void @__asan_alloca_poison(i64 [[TMP59]], i64 8)
+; CHECK-INLINE-NEXT:    [[TMP60:%.*]] = ptrtoint ptr [[TMP57]] to i64
+; CHECK-INLINE-NEXT:    store i64 [[TMP60]], ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT:    [[TMP61:%.*]] = inttoptr i64 [[TMP59]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP62:%.*]] = lshr i64 [[TMP59]], 3
+; CHECK-INLINE-NEXT:    [[TMP63:%.*]] = add i64 [[TMP62]], [[TMP1]]
+; CHECK-INLINE-NEXT:    [[TMP64:%.*]] = inttoptr i64 [[TMP63]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP65:%.*]] = load i8, ptr [[TMP64]], align 1
+; CHECK-INLINE-NEXT:    [[TMP66:%.*]] = icmp ne i8 [[TMP65]], 0
+; CHECK-INLINE-NEXT:    br i1 [[TMP66]], label [[TMP67:%.*]], label [[TMP68:%.*]]
+; CHECK-INLINE:       67:
+; CHECK-INLINE-NEXT:    call void @__asan_report_store8(i64 [[TMP59]]) #[[ATTR8]]
 ; CHECK-INLINE-NEXT:    unreachable
-; CHECK-INLINE:       62:
-; CHECK-INLINE-NEXT:    store volatile i64 0, ptr [[TMP55]], align 8
-; CHECK-INLINE-NEXT:    [[TMPCOPYI64:%.*]] = load i64, ptr [[TMP55]], align 8
-; CHECK-INLINE-NEXT:    [[TMP63:%.*]] = and i64 [[TMPCOPYI64]], 31
-; CHECK-INLINE-NEXT:    [[TMP64:%.*]] = sub i64 32, [[TMP63]]
-; CHECK-INLINE-NEXT:    [[TMP65:%.*]] = icmp ne i64 [[TMP64]], 32
-; CHECK-INLINE-NEXT:    [[TMP66:%.*]] = select i1 [[TMP65]], i64 [[TMP64]], i64 0
-; CHECK-INLINE-NEXT:    [[TMP67:%.*]] = add i64 64, [[TMP66]]
-; CHECK-INLINE-NEXT:    [[TMP68:%.*]] = add i64 [[TMPCOPYI64]], [[TMP67]]
-; CHECK-INLINE-NEXT:    [[TMP69:%.*]] = alloca i8, i64 [[TMP68]], align 32
-; CHECK-INLINE-NEXT:    [[TMP70:%.*]] = ptrtoint ptr [[TMP69]] to i64
-; CHECK-INLINE-NEXT:    [[TMP71:%.*]] = add i64 [[TMP70]], 32
-; CHECK-INLINE-NEXT:    call void @__asan_alloca_poison(i64 [[TMP71]], i64 [[TMPCOPYI64]])
-; CHECK-INLINE-NEXT:    [[TMP72:%.*]] = ptrtoint ptr [[TMP69]] to i64
-; CHECK-INLINE-NEXT:    store i64 [[TMP72]], ptr [[TMP0]], align 8
-; CHECK-INLINE-NEXT:    [[TMP73:%.*]] = inttoptr i64 [[TMP71]] to ptr
-; CHECK-INLINE-NEXT:    [[TMP74:%.*]] = lshr i64 [[TMP71]], 3
-; CHECK-INLINE-NEXT:    [[TMP75:%.*]] = add i64 [[TMP74]], [[TMP1]]
-; CHECK-INLINE-NEXT:    [[TMP76:%.*]] = inttoptr i64 [[TMP75]] to ptr
-; CHECK-INLINE-NEXT:    [[TMP77:%.*]] = load i8, ptr [[TMP76]], align 1
-; CHECK-INLINE-NEXT:    [[TMP78:%.*]] = icmp ne i8 [[TMP77]], 0
-; CHECK-INLINE-NEXT:    br i1 [[TMP78]], label [[TMP79:%.*]], label [[TMP84:%.*]], !prof [[PROF0]]
-; CHECK-INLINE:       79:
-; CHECK-INLINE-NEXT:    [[TMP80:%.*]] = and i64 [[TMP71]], 7
-; CHECK-INLINE-NEXT:    [[TMP81:%.*]] = trunc i64 [[TMP80]] to i8
-; CHECK-INLINE-NEXT:    [[TMP82:%.*]] = icmp sge i8 [[TMP81]], [[TMP77]]
-; CHECK-INLINE-NEXT:    br i1 [[TMP82]], label [[TMP83:%.*]], label [[TMP84]]
-; CHECK-INLINE:       83:
-; CHECK-INLINE-NEXT:    call void @__asan_report_store1(i64 [[TMP71]]) #[[ATTR8]]
+; CHECK-INLINE:       68:
+; CHECK-INLINE-NEXT:    store volatile i64 0, ptr [[TMP61]], align 8
+; CHECK-INLINE-NEXT:    [[TMPCOPYI64:%.*]] = load i64, ptr [[TMP61]], align 8
+; CHECK-INLINE-NEXT:    [[TMP69:%.*]] = and i64 [[TMPCOPYI64]], 31
+; CHECK-INLINE-NEXT:    [[TMP70:%.*]] = sub i64 32, [[TMP69]]
+; CHECK-INLINE-NEXT:    [[TMP71:%.*]] = icmp ne i64 [[TMP70]], 32
+; CHECK-INLINE-NEXT:    [[TMP72:%.*]] = select i1 [[TMP71]], i64 [[TMP70]], i64 0
+; CHECK-INLINE-NEXT:    [[TMP73:%.*]] = add i64 64, [[TMP72]]
+; CHECK-INLINE-NEXT:    [[TMP74:%.*]] = add i64 [[TMPCOPYI64]], [[TMP73]]
+; CHECK-INLINE-NEXT:    [[TMP75:%.*]] = alloca i8, i64 [[TMP74]], align 32
+; CHECK-INLINE-NEXT:    [[TMP76:%.*]] = ptrtoint ptr [[TMP75]] to i64
+; CHECK-INLINE-NEXT:    [[TMP77:%.*]] = add i64 [[TMP76]], 32
+; CHECK-INLINE-NEXT:    call void @__asan_alloca_poison(i64 [[TMP77]], i64 [[TMPCOPYI64]])
+; CHECK-INLINE-NEXT:    [[TMP78:%.*]] = ptrtoint ptr [[TMP75]] to i64
+; CHECK-INLINE-NEXT:    store i64 [[TMP78]], ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT:    [[TMP79:%.*]] = inttoptr i64 [[TMP77]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP80:%.*]] = lshr i64 [[TMP77]], 3
+; CHECK-INLINE-NEXT:    [[TMP81:%.*]] = add i64 [[TMP80]], [[TMP1]]
+; CHECK-INLINE-NEXT:    [[TMP82:%.*]] = inttoptr i64 [[TMP81]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP83:%.*]] = load i8, ptr [[TMP82]], align 1
+; CHECK-INLINE-NEXT:    [[TMP84:%.*]] = icmp ne i8 [[TMP83]], 0
+; CHECK-INLINE-NEXT:    br i1 [[TMP84]], label [[TMP85:%.*]], label [[TMP90:%.*]], !prof [[PROF0]]
+; CHECK-INLINE:       85:
+; CHECK-INLINE-NEXT:    [[TMP86:%.*]] = and i64 [[TMP77]], 7
+; CHECK-INLINE-NEXT:    [[TMP87:%.*]] = trunc i64 [[TMP86]] to i8
+; CHECK-INLINE-NEXT:    [[TMP88:%.*]] = icmp sge i8 [[TMP87]], [[TMP83]]
+; CHECK-INLINE-NEXT:    br i1 [[TMP88]], label [[TMP89:%.*]], label [[TMP90]]
+; CHECK-INLINE:       89:
+; CHECK-INLINE-NEXT:    call void @__asan_report_store1(i64 [[TMP77]]) #[[ATTR8]]
 ; CHECK-INLINE-NEXT:    unreachable
-; CHECK-INLINE:       84:
-; CHECK-INLINE-NEXT:    store volatile i8 0, ptr [[TMP73]], align 1
+; CHECK-INLINE:       90:
+; CHECK-INLINE-NEXT:    store volatile i8 0, ptr [[TMP79]], align 1
 ; CHECK-INLINE-NEXT:    invoke void @MayThrowFunc()
 ; CHECK-INLINE-NEXT:            to label [[INVOKE_CONT:%.*]] unwind label [[EHCLEANUP:%.*]]
 ; CHECK-INLINE:       invoke.cont:
 ; CHECK-INLINE-NEXT:    call void @DeInit(ptr [[TMP14]])
-; CHECK-INLINE-NEXT:    [[TMP85:%.*]] = ptrtoint ptr [[TMP0]] to i64
-; CHECK-INLINE-NEXT:    [[TMP86:%.*]] = load i64, ptr [[TMP0]], align 8
-; CHECK-INLINE-NEXT:    call void @__asan_allocas_unpoison(i64 [[TMP86]], i64 [[TMP85]])
-; CHECK-INLINE-NEXT:    store i64 1172321806, ptr [[TMP17]], align 8
-; CHECK-INLINE-NEXT:    [[TMP87:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-INLINE-NEXT:    br i1 [[TMP87]], label [[TMP88:%.*]], label [[TMP89:%.*]]
-; CHECK-INLINE:       88:
+; CHECK-INLINE-NEXT:    [[TMP91:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-INLINE-NEXT:    [[TMP92:%.*]] = load i64, ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT:    call void @__asan_allocas_unpoison(i64 [[TMP92]], i64 [[TMP91]])
+; CHECK-INLINE-NEXT:    store i64 1172321806, ptr [[TMP23]], align 8
+; CHECK-INLINE-NEXT:    [[TMP93:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-INLINE-NEXT:    br i1 [[TMP93]], label [[TMP94:%.*]], label [[TMP95:%.*]]
+; CHECK-INLINE:       94:
 ; CHECK-INLINE-NEXT:    call void @__asan_stack_free_8(i64 [[TMP7]], i64 8544)
-; CHECK-INLINE-NEXT:    br label [[TMP91:%.*]]
-; CHECK-INLINE:       89:
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP23]], i64 4)
-; CHECK-INLINE-NEXT:    [[TMP90:%.*]] = add i64 [[TMP23]], 1028
-; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP90]], i64 40)
-; CHECK-INLINE-NEXT:    br label [[TMP91]]
-; CHECK-INLINE:       91:
+; CHECK-INLINE-NEXT:    br label [[TMP97:%.*]]
+; CHECK-INLINE:       95:
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP29]], i64 4)
+; CHECK-INLINE-NEXT:    [[TMP96:%.*]] = add i64 [[TMP29]], 1028
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP96]], i64 40)
+; CHECK-INLINE-NEXT:    br label [[TMP97]]
+; CHECK-INLINE:       97:
 ; CHECK-INLINE-NEXT:    ret void
 ; CHECK-INLINE:       ehcleanup:
-; CHECK-INLINE-NEXT:    [[TMP92:%.*]] = cleanuppad within none []
+; CHECK-INLINE-NEXT:    [[TMP98:%.*]] = cleanuppad within none []
+; CHECK-INLINE-NEXT:    call void @__asan_unpoison_stack_memory(i64 [[TMP54]], i64 4) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP56]])
+; CHECK-INLINE-NEXT:    [[TMP99:%.*]] = lshr i64 [[TMP54]], 3
+; CHECK-INLINE-NEXT:    [[TMP100:%.*]] = add i64 [[TMP99]], [[TMP1]]
+; CHECK-INLINE-NEXT:    [[TMP101:%.*]] = inttoptr i64 [[TMP100]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP102:%.*]] = load i8, ptr [[TMP101]], align 1
+; CHECK-INLINE-NEXT:    [[TMP103:%.*]] = icmp ne i8 [[TMP102]], 0
+; CHECK-INLINE-NEXT:    br i1 [[TMP103]], label [[TMP104:%.*]], label [[TMP109:%.*]], !prof [[PROF0]]
+; CHECK-INLINE:       104:
+; CHECK-INLINE-NEXT:    [[TMP105:%.*]] = and i64 [[TMP54]], 7
+; CHECK-INLINE-NEXT:    [[TMP106:%.*]] = trunc i64 [[TMP105]] to i8
+; CHECK-INLINE-NEXT:    [[TMP107:%.*]] = icmp sge i8 [[TMP106]], [[TMP102]]
+; CHECK-INLINE-NEXT:    br i1 [[TMP107]], label [[TMP108:%.*]], label [[TMP109]]
+; CHECK-INLINE:       108:
+; CHECK-INLINE-NEXT:    call void @__asan_report_store1(i64 [[TMP54]]) #[[ATTR8]] [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    unreachable
+; CHECK-INLINE:       109:
+; CHECK-INLINE-NEXT:    store volatile i8 0, ptr [[TMP56]], align 1
+; CHECK-INLINE-NEXT:    call void @__asan_poison_stack_memory(i64 [[TMP54]], i64 4) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP56]])
+; CHECK-INLINE-NEXT:    call void @DeInit(ptr [[TMP14]]) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    [[TMP110:%.*]] = call ptr @__asan_memset(ptr [[TMP16]], i32 0, i64 4) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    [[TMP111:%.*]] = call ptr @__asan_memcpy(ptr [[TMP18]], ptr [[TMP16]], i64 4) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    [[TMP112:%.*]] = call ptr @__asan_memmove(ptr [[TMP20]], ptr [[TMP16]], i64 4) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    call void @__sanitizer_ptr_cmp(i64 [[TMP15]], i64 [[TMP17]]) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    call void @__sanitizer_ptr_sub(i64 [[TMP15]], i64 [[TMP17]]) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    [[TMP113:%.*]] = ptrtoint ptr [[PTRPARAM]] to i64
+; CHECK-INLINE-NEXT:    [[TMP114:%.*]] = add i64 [[TMP113]], 7
+; CHECK-INLINE-NEXT:    [[TMP115:%.*]] = inttoptr i64 [[TMP114]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP116:%.*]] = ptrtoint ptr [[PTRPARAM]] to i64
+; CHECK-INLINE-NEXT:    [[TMP117:%.*]] = lshr i64 [[TMP116]], 3
+; CHECK-INLINE-NEXT:    [[TMP118:%.*]] = add i64 [[TMP117]], [[TMP1]]
+; CHECK-INLINE-NEXT:    [[TMP119:%.*]] = inttoptr i64 [[TMP118]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP120:%.*]] = load i8, ptr [[TMP119]], align 1
+; CHECK-INLINE-NEXT:    [[TMP121:%.*]] = icmp ne i8 [[TMP120]], 0
+; CHECK-INLINE-NEXT:    br i1 [[TMP121]], label [[TMP122:%.*]], label [[TMP127:%.*]], !prof [[PROF0]]
+; CHECK-INLINE:       122:
+; CHECK-INLINE-NEXT:    [[TMP123:%.*]] = and i64 [[TMP116]], 7
+; CHECK-INLINE-NEXT:    [[TMP124:%.*]] = trunc i64 [[TMP123]] to i8
+; CHECK-INLINE-NEXT:    [[TMP125:%.*]] = icmp sge i8 [[TMP124]], [[TMP120]]
+; CHECK-INLINE-NEXT:    br i1 [[TMP125]], label [[TMP126:%.*]], label [[TMP127]]
+; CHECK-INLINE:       126:
+; CHECK-INLINE-NEXT:    call void @__asan_report_store_n(i64 [[TMP116]], i64 8) #[[ATTR8]] [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    unreachable
+; CHECK-INLINE:       127:
+; CHECK-INLINE-NEXT:    [[TMP128:%.*]] = lshr i64 [[TMP114]], 3
+; CHECK-INLINE-NEXT:    [[TMP129:%.*]] = add i64 [[TMP128]], [[TMP1]]
+; CHECK-INLINE-NEXT:    [[TMP130:%.*]] = inttoptr i64 [[TMP129]] to ptr
+; CHECK-INLINE-NEXT:    [[TMP131:%.*]] = load i8, ptr [[TMP130]], align 1
+; CHECK-INLINE-NEXT:    [[TMP132:%.*]] = icmp ne i8 [[TMP131]], 0
+; CHECK-INLINE-NEXT:    br i1 [[TMP132]], label [[TMP133:%.*]], label [[EHEXIT:%.*]], !prof [[PROF0]]
+; CHECK-INLINE:       133:
+; CHECK-INLINE-NEXT:    [[TMP134:%.*]] = and i64 [[TMP114]], 7
+; CHECK-INLINE-NEXT:    [[TMP135:%.*]] = trunc i64 [[TMP134]] to i8
+; CHECK-INLINE-NEXT:    [[TMP136:%.*]] = icmp sge i8 [[TMP135]], [[TMP131]]
+; CHECK-INLINE-NEXT:    br i1 [[TMP136]], label [[TMP137:%.*]], label [[EHEXIT]]
+; CHECK-INLINE:       137:
+; CHECK-INLINE-NEXT:    call void @__asan_report_store_n(i64 [[TMP114]], i64 8) #[[ATTR8]] [ "funclet"(token [[TMP98]]) ]
 ; CHECK-INLINE-NEXT:    unreachable
+; CHECK-INLINE:       ehexit:
+; CHECK-INLINE-NEXT:    store i64 0, ptr [[PTRPARAM]], align 1
+; CHECK-INLINE-NEXT:    [[TMP138:%.*]] = call i64 @llvm.get.dynamic.area.offset.i64()
+; CHECK-INLINE-NEXT:    [[TMP139:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-INLINE-NEXT:    [[TMP140:%.*]] = add i64 [[TMP139]], [[TMP138]]
+; CHECK-INLINE-NEXT:    [[TMP141:%.*]] = load i64, ptr [[TMP0]], align 8
+; CHECK-INLINE-NEXT:    call void @__asan_allocas_unpoison(i64 [[TMP141]], i64 [[TMP140]]) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    store i64 1172321806, ptr [[TMP23]], align 8
+; CHECK-INLINE-NEXT:    [[TMP142:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-INLINE-NEXT:    br i1 [[TMP142]], label [[TMP143:%.*]], label [[TMP144:%.*]]
+; CHECK-INLINE:       143:
+; CHECK-INLINE-NEXT:    call void @__asan_stack_free_8(i64 [[TMP7]], i64 8544) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    br label [[TMP146:%.*]]
+; CHECK-INLINE:       144:
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP29]], i64 4) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    [[TMP145:%.*]] = add i64 [[TMP29]], 1028
+; CHECK-INLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP145]], i64 40) [ "funclet"(token [[TMP98]]) ]
+; CHECK-INLINE-NEXT:    br label [[TMP146]]
+; CHECK-INLINE:       146:
+; CHECK-INLINE-NEXT:    cleanupret from [[TMP98]] unwind to caller
 ;
 ; CHECK-OUTLINE-LABEL: define void @FuncletPersonality(
 ; CHECK-OUTLINE-SAME: ptr [[PTRPARAM:%.*]]) #[[ATTR4:[0-9]+]] personality ptr @__CxxFrameHandler3 {
@@ -211,96 +292,140 @@ define void @FuncletPersonality(ptr %ptrParam) sanitize_address personality ptr
 ; CHECK-OUTLINE-NEXT:    store i64 [[TMP12]], ptr [[ASAN_LOCAL_STACK_BASE]], align 8
 ; CHECK-OUTLINE-NEXT:    [[TMP13:%.*]] = add i64 [[TMP12]], 32
 ; CHECK-OUTLINE-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-OUTLINE-NEXT:    [[TMP15:%.*]] = add i64 [[TMP12]], 8528
+; CHECK-OUTLINE-NEXT:    [[TMP15:%.*]] = add i64 [[TMP12]], 8480
 ; CHECK-OUTLINE-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-OUTLINE-NEXT:    [[TMP17:%.*]] = add i64 [[TMP12]], 8560
+; CHECK-OUTLINE-NEXT:    [[TMP17:%.*]] = add i64 [[TMP12]], 8496
 ; CHECK-OUTLINE-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-; CHECK-OUTLINE-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP12]] to ptr
-; CHECK-OUTLINE-NEXT:    store i64 1102416563, ptr [[TMP19]], align 8
-; CHECK-OUTLINE-NEXT:    [[TMP20:%.*]] = add i64 [[TMP12]], 8
-; CHECK-OUTLINE-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-; CHECK-OUTLINE-NEXT:    store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP21]], align 8
-; CHECK-OUTLINE-NEXT:    [[TMP22:%.*]] = add i64 [[TMP12]], 16
-; CHECK-OUTLINE-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-OUTLINE-NEXT:    store i64 ptrtoint (ptr @FuncletPersonality to i64), ptr [[TMP23]], align 8
-; CHECK-OUTLINE-NEXT:    [[TMP24:%.*]] = lshr i64 [[TMP12]], 3
-; CHECK-OUTLINE-NEXT:    [[TMP25:%.*]] = add i64 [[TMP24]], [[TMP1]]
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f1(i64 [[TMP25]], i64 4)
-; CHECK-OUTLINE-NEXT:    [[TMP26:%.*]] = add i64 [[TMP25]], 1028
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP26]], i64 32)
-; CHECK-OUTLINE-NEXT:    [[TMP27:%.*]] = add i64 [[TMP25]], 1060
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP27]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP28:%.*]] = add i64 [[TMP25]], 1061
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP28]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP29:%.*]] = add i64 [[TMP25]], 1062
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP29]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP30:%.*]] = add i64 [[TMP25]], 1063
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP30]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP31:%.*]] = add i64 [[TMP25]], 1064
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP31]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP32:%.*]] = add i64 [[TMP25]], 1065
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP32]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP33:%.*]] = add i64 [[TMP25]], 1066
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP33]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP34:%.*]] = add i64 [[TMP25]], 1067
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP34]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP35:%.*]] = add i64 [[TMP25]], 1068
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP35]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP36:%.*]] = add i64 [[TMP25]], 1069
+; CHECK-OUTLINE-NEXT:    [[TMP19:%.*]] = add i64 [[TMP12]], 8512
+; CHECK-OUTLINE-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
+; CHECK-OUTLINE-NEXT:    [[TMP21:%.*]] = add i64 [[TMP12]], 8528
+; CHECK-OUTLINE-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
+; CHECK-OUTLINE-NEXT:    [[TMP23:%.*]] = add i64 [[TMP12]], 8544
+; CHECK-OUTLINE-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+; CHECK-OUTLINE-NEXT:    [[TMP25:%.*]] = add i64 [[TMP12]], 8560
+; CHECK-OUTLINE-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
+; CHECK-OUTLINE-NEXT:    [[TMP27:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-OUTLINE-NEXT:    store i64 1102416563, ptr [[TMP27]], align 8
+; CHECK-OUTLINE-NEXT:    [[TMP28:%.*]] = add i64 [[TMP12]], 8
+; CHECK-OUTLINE-NEXT:    [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr
+; CHECK-OUTLINE-NEXT:    store i64 ptrtoint (ptr @___asan_gen_ to i64), ptr [[TMP29]], align 8
+; CHECK-OUTLINE-NEXT:    [[TMP30:%.*]] = add i64 [[TMP12]], 16
+; CHECK-OUTLINE-NEXT:    [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr
+; CHECK-OUTLINE-NEXT:    store i64 ptrtoint (ptr @FuncletPersonality to i64), ptr [[TMP31]], align 8
+; CHECK-OUTLINE-NEXT:    [[TMP32:%.*]] = lshr i64 [[TMP12]], 3
+; CHECK-OUTLINE-NEXT:    [[TMP33:%.*]] = add i64 [[TMP32]], [[TMP1]]
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f1(i64 [[TMP33]], i64 4)
+; CHECK-OUTLINE-NEXT:    [[TMP34:%.*]] = add i64 [[TMP33]], 1028
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP34]], i64 32)
+; CHECK-OUTLINE-NEXT:    [[TMP35:%.*]] = add i64 [[TMP33]], 1060
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP35]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP36:%.*]] = add i64 [[TMP33]], 1061
 ; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP36]], i64 1)
-; CHECK-OUTLINE-NEXT:    [[TMP37:%.*]] = add i64 [[TMP25]], 1071
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f3(i64 [[TMP37]], i64 5)
-; CHECK-OUTLINE-NEXT:    [[TMP38:%.*]] = add i64 [[TMP25]], 1066
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP38]], i64 1)
-; CHECK-OUTLINE-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP16]])
-; CHECK-OUTLINE-NEXT:    call void @__asan_store1(i64 [[TMP15]])
-; CHECK-OUTLINE-NEXT:    store volatile i8 0, ptr [[TMP16]], align 1
-; CHECK-OUTLINE-NEXT:    [[TMP39:%.*]] = add i64 [[TMP25]], 1066
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP39]], i64 1)
-; CHECK-OUTLINE-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP16]])
-; CHECK-OUTLINE-NEXT:    call void @__asan_store8(i64 [[TMP17]])
-; CHECK-OUTLINE-NEXT:    store volatile i64 0, ptr [[TMP18]], align 8
-; CHECK-OUTLINE-NEXT:    [[TMPCOPYI64:%.*]] = load i64, ptr [[TMP18]], align 8
-; CHECK-OUTLINE-NEXT:    [[TMP40:%.*]] = and i64 [[TMPCOPYI64]], 31
-; CHECK-OUTLINE-NEXT:    [[TMP41:%.*]] = sub i64 32, [[TMP40]]
-; CHECK-OUTLINE-NEXT:    [[TMP42:%.*]] = icmp ne i64 [[TMP41]], 32
-; CHECK-OUTLINE-NEXT:    [[TMP43:%.*]] = select i1 [[TMP42]], i64 [[TMP41]], i64 0
-; CHECK-OUTLINE-NEXT:    [[TMP44:%.*]] = add i64 64, [[TMP43]]
-; CHECK-OUTLINE-NEXT:    [[TMP45:%.*]] = add i64 [[TMPCOPYI64]], [[TMP44]]
-; CHECK-OUTLINE-NEXT:    [[TMP46:%.*]] = alloca i8, i64 [[TMP45]], align 32
-; CHECK-OUTLINE-NEXT:    [[TMP47:%.*]] = ptrtoint ptr [[TMP46]] to i64
-; CHECK-OUTLINE-NEXT:    [[TMP48:%.*]] = add i64 [[TMP47]], 32
-; CHECK-OUTLINE-NEXT:    call void @__asan_alloca_poison(i64 [[TMP48]], i64 [[TMPCOPYI64]])
-; CHECK-OUTLINE-NEXT:    [[TMP49:%.*]] = ptrtoint ptr [[TMP46]] to i64
-; CHECK-OUTLINE-NEXT:    store i64 [[TMP49]], ptr [[TMP0]], align 8
-; CHECK-OUTLINE-NEXT:    [[TMP50:%.*]] = inttoptr i64 [[TMP48]] to ptr
-; CHECK-OUTLINE-NEXT:    call void @__asan_store1(i64 [[TMP48]])
-; CHECK-OUTLINE-NEXT:    store volatile i8 0, ptr [[TMP50]], align 1
+; CHECK-OUTLINE-NEXT:    [[TMP37:%.*]] = add i64 [[TMP33]], 1062
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP37]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP38:%.*]] = add i64 [[TMP33]], 1063
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP38]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP39:%.*]] = add i64 [[TMP33]], 1064
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP39]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP40:%.*]] = add i64 [[TMP33]], 1065
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP40]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP41:%.*]] = add i64 [[TMP33]], 1066
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP41]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP42:%.*]] = add i64 [[TMP33]], 1067
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP42]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP43:%.*]] = add i64 [[TMP33]], 1068
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP43]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP44:%.*]] = add i64 [[TMP33]], 1069
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f2(i64 [[TMP44]], i64 1)
+; CHECK-OUTLINE-NEXT:    [[TMP45:%.*]] = add i64 [[TMP33]], 1071
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f3(i64 [[TMP45]], i64 5)
+; CHECK-OUTLINE-NEXT:    [[TMP46:%.*]] = add i64 [[TMP33]], 1066
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP46]], i64 1)
+; CHECK-OUTLINE-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP22]])
+; CHECK-OUTLINE-NEXT:    call void @__asan_store1(i64 [[TMP21]])
+; CHECK-OUTLINE-NEXT:    store volatile i8 0, ptr [[TMP22]], align 1
+; CHECK-OUTLINE-NEXT:    [[TMP47:%.*]] = add i64 [[TMP33]], 1066
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP47]], i64 1)
+; CHECK-OUTLINE-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP22]])
+; CHECK-OUTLINE-NEXT:    call void @__asan_store8(i64 [[TMP25]])
+; CHECK-OUTLINE-NEXT:    store volatile i64 0, ptr [[TMP26]], align 8
+; CHECK-OUTLINE-NEXT:    [[TMPCOPYI64:%.*]] = load i64, ptr [[TMP26]], align 8
+; CHECK-OUTLINE-NEXT:    [[TMP48:%.*]] = and i64 [[TMPCOPYI64]], 31
+; CHECK-OUTLINE-NEXT:    [[TMP49:%.*]] = sub i64 32, [[TMP48]]
+; CHECK-OUTLINE-NEXT:    [[TMP50:%.*]] = icmp ne i64 [[TMP49]], 32
+; CHECK-OUTLINE-NEXT:    [[TMP51:%.*]] = select i1 [[TMP50]], i64 [[TMP49]], i64 0
+; CHECK-OUTLINE-NEXT:    [[TMP52:%.*]] = add i64 64, [[TMP51]]
+; CHECK-OUTLINE-NEXT:    [[TMP53:%.*]] = add i64 [[TMPCOPYI64]], [[TMP52]]
+; CHECK-OUTLINE-NEXT:    [[TMP54:%.*]] = alloca i8, i64 [[TMP53]], align 32
+; CHECK-OUTLINE-NEXT:    [[TMP55:%.*]] = ptrtoint ptr [[TMP54]] to i64
+; CHECK-OUTLINE-NEXT:    [[TMP56:%.*]] = add i64 [[TMP55]], 32
+; CHECK-OUTLINE-NEXT:    call void @__asan_alloca_poison(i64 [[TMP56]], i64 [[TMPCOPYI64]])
+; CHECK-OUTLINE-NEXT:    [[TMP57:%.*]] = ptrtoint ptr [[TMP54]] to i64
+; CHECK-OUTLINE-NEXT:    store i64 [[TMP57]], ptr [[TMP0]], align 8
+; CHECK-OUTLINE-NEXT:    [[TMP58:%.*]] = inttoptr i64 [[TMP56]] to ptr
+; CHECK-OUTLINE-NEXT:    call void @__asan_store1(i64 [[TMP56]])
+; CHECK-OUTLINE-NEXT:    store volatile i8 0, ptr [[TMP58]], align 1
 ; CHECK-OUTLINE-NEXT:    invoke void @MayThrowFunc()
 ; CHECK-OUTLINE-NEXT:            to label [[INVOKE_CONT:%.*]] unwind label [[EHCLEANUP:%.*]]
 ; CHECK-OUTLINE:       invoke.cont:
 ; CHECK-OUTLINE-NEXT:    call void @DeInit(ptr [[TMP14]])
-; CHECK-OUTLINE-NEXT:    [[TMP51:%.*]] = ptrtoint ptr [[TMP0]] to i64
-; CHECK-OUTLINE-NEXT:    [[TMP52:%.*]] = load i64, ptr [[TMP0]], align 8
-; CHECK-OUTLINE-NEXT:    call void @__asan_allocas_unpoison(i64 [[TMP52]], i64 [[TMP51]])
-; CHECK-OUTLINE-NEXT:    store i64 1172321806, ptr [[TMP19]], align 8
-; CHECK-OUTLINE-NEXT:    [[TMP53:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-OUTLINE-NEXT:    br i1 [[TMP53]], label [[TMP54:%.*]], label [[TMP55:%.*]]
-; CHECK-OUTLINE:       54:
+; CHECK-OUTLINE-NEXT:    [[TMP59:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-OUTLINE-NEXT:    [[TMP60:%.*]] = load i64, ptr [[TMP0]], align 8
+; CHECK-OUTLINE-NEXT:    call void @__asan_allocas_unpoison(i64 [[TMP60]], i64 [[TMP59]])
+; CHECK-OUTLINE-NEXT:    store i64 1172321806, ptr [[TMP27]], align 8
+; CHECK-OUTLINE-NEXT:    [[TMP61:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-OUTLINE-NEXT:    br i1 [[TMP61]], label [[TMP62:%.*]], label [[TMP63:%.*]]
+; CHECK-OUTLINE:       62:
 ; CHECK-OUTLINE-NEXT:    call void @__asan_stack_free_8(i64 [[TMP7]], i64 8608)
-; CHECK-OUTLINE-NEXT:    br label [[TMP58:%.*]]
-; CHECK-OUTLINE:       55:
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP25]], i64 4)
-; CHECK-OUTLINE-NEXT:    [[TMP56:%.*]] = add i64 [[TMP25]], 1028
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP56]], i64 42)
-; CHECK-OUTLINE-NEXT:    [[TMP57:%.*]] = add i64 [[TMP25]], 1071
-; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP57]], i64 5)
-; CHECK-OUTLINE-NEXT:    br label [[TMP58]]
-; CHECK-OUTLINE:       58:
+; CHECK-OUTLINE-NEXT:    br label [[TMP66:%.*]]
+; CHECK-OUTLINE:       63:
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP33]], i64 4)
+; CHECK-OUTLINE-NEXT:    [[TMP64:%.*]] = add i64 [[TMP33]], 1028
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP64]], i64 42)
+; CHECK-OUTLINE-NEXT:    [[TMP65:%.*]] = add i64 [[TMP33]], 1071
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP65]], i64 5)
+; CHECK-OUTLINE-NEXT:    br label [[TMP66]]
+; CHECK-OUTLINE:       66:
 ; CHECK-OUTLINE-NEXT:    ret void
 ; CHECK-OUTLINE:       ehcleanup:
-; CHECK-OUTLINE-NEXT:    [[TMP59:%.*]] = cleanuppad within none []
-; CHECK-OUTLINE-NEXT:    unreachable
+; CHECK-OUTLINE-NEXT:    [[TMP67:%.*]] = cleanuppad within none []
+; CHECK-OUTLINE-NEXT:    [[TMP68:%.*]] = add i64 [[TMP33]], 1068
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_04(i64 [[TMP68]], i64 1) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP24]])
+; CHECK-OUTLINE-NEXT:    call void @__asan_store1(i64 [[TMP23]]) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    store volatile i8 0, ptr [[TMP24]], align 1
+; CHECK-OUTLINE-NEXT:    [[TMP69:%.*]] = add i64 [[TMP33]], 1068
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_f8(i64 [[TMP69]], i64 1) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP24]])
+; CHECK-OUTLINE-NEXT:    call void @DeInit(ptr [[TMP14]]) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    [[TMP70:%.*]] = call ptr @__asan_memset(ptr [[TMP16]], i32 0, i64 4) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    [[TMP71:%.*]] = call ptr @__asan_memcpy(ptr [[TMP18]], ptr [[TMP16]], i64 4) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    [[TMP72:%.*]] = call ptr @__asan_memmove(ptr [[TMP20]], ptr [[TMP16]], i64 4) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    call void @__sanitizer_ptr_cmp(i64 [[TMP15]], i64 [[TMP17]]) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    call void @__sanitizer_ptr_sub(i64 [[TMP15]], i64 [[TMP17]]) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    [[TMP73:%.*]] = ptrtoint ptr [[PTRPARAM]] to i64
+; CHECK-OUTLINE-NEXT:    call void @__asan_storeN(i64 [[TMP73]], i64 8) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    store i64 0, ptr [[PTRPARAM]], align 1
+; CHECK-OUTLINE-NEXT:    [[TMP74:%.*]] = call i64 @llvm.get.dynamic.area.offset.i64()
+; CHECK-OUTLINE-NEXT:    [[TMP75:%.*]] = ptrtoint ptr [[TMP0]] to i64
+; CHECK-OUTLINE-NEXT:    [[TMP76:%.*]] = add i64 [[TMP75]], [[TMP74]]
+; CHECK-OUTLINE-NEXT:    [[TMP77:%.*]] = load i64, ptr [[TMP0]], align 8
+; CHECK-OUTLINE-NEXT:    call void @__asan_allocas_unpoison(i64 [[TMP77]], i64 [[TMP76]]) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    store i64 1172321806, ptr [[TMP27]], align 8
+; CHECK-OUTLINE-NEXT:    [[TMP78:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-OUTLINE-NEXT:    br i1 [[TMP78]], label [[TMP79:%.*]], label [[TMP80:%.*]]
+; CHECK-OUTLINE:       79:
+; CHECK-OUTLINE-NEXT:    call void @__asan_stack_free_8(i64 [[TMP7]], i64 8608) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    br label [[TMP83:%.*]]
+; CHECK-OUTLINE:       80:
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP33]], i64 4) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    [[TMP81:%.*]] = add i64 [[TMP33]], 1028
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP81]], i64 42) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    [[TMP82:%.*]] = add i64 [[TMP33]], 1071
+; CHECK-OUTLINE-NEXT:    call void @__asan_set_shadow_00(i64 [[TMP82]], i64 5) [ "funclet"(token [[TMP67]]) ]
+; CHECK-OUTLINE-NEXT:    br label [[TMP83]]
+; CHECK-OUTLINE:       83:
+; CHECK-OUTLINE-NEXT:    cleanupret from [[TMP67]] unwind to caller
 ;
 
 
diff --git a/llvm/test/Instrumentation/AddressSanitizer/localescape.ll b/llvm/test/Instrumentation/AddressSanitizer/localescape.ll
index 4bce2203b3ee35..9806dec0fd94a7 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/localescape.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/localescape.ll
@@ -14,10 +14,10 @@ declare ptr @llvm.eh.recoverfp(ptr, ptr)
 declare ptr @llvm.localrecover(ptr, ptr, i32)
 declare void @llvm.localescape(...) #1
 
-declare i32 @_except_handler3(...)
+declare i32 @__gcc_personality_v0(...)
 declare void @may_throw(ptr %r)
 
-define i32 @main() sanitize_address personality ptr @_except_handler3 {
+define i32 @main() sanitize_address personality ptr @__gcc_personality_v0 {
 entry:
   %r = alloca i32, align 4
   %__exception_code = alloca i32, align 4

>From e99ccaa6909d88f3603cb2f43cd873c5ddea6ae9 Mon Sep 17 00:00:00 2001
From: Sylvain Audi <sylvain.audi at ubisoft.com>
Date: Wed, 21 Feb 2024 16:09:27 -0500
Subject: [PATCH 2/3] Formatting

---
 .../Transforms/Instrumentation/AddressSanitizer.cpp    | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index f7aef729dd348d..3b9f80c78b0874 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1327,11 +1327,11 @@ void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
                                               RuntimeCallInserter &RTCI) {
   InstrumentationIRBuilder IRB(MI);
   if (isa<MemTransferInst>(MI)) {
-  RTCI.createRuntimeCall(
-      IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
-      {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
-       IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
-       IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
+    RTCI.createRuntimeCall(
+        IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
+        {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
+         IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
+         IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
   } else if (isa<MemSetInst>(MI)) {
     RTCI.createRuntimeCall(
         IRB, AsanMemset,

>From 79cd9abc0c1bdf3f15d4a53c7edc400c9f1b15d0 Mon Sep 17 00:00:00 2001
From: Sylvain Audi <sylvain.audi at ubisoft.com>
Date: Wed, 21 Feb 2024 16:39:50 -0500
Subject: [PATCH 3/3] Skip the error message in case of colorless BB.

---
 llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 3b9f80c78b0874..6d70c6bf7c2ec6 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -675,6 +675,8 @@ class RuntimeCallInserter {
       // funclet opbundles are only valid in monochromatic BBs.
       // Note that unreachable BBs are seen as colorless by colorEHFunclets()
       // and will be DCE'ed later.
+      if (Colors.empty())
+        continue;
       if (Colors.size() != 1) {
         OwnerFn->getContext().emitError(
             "Instruction's BasicBlock is not monochromatic");



More information about the llvm-commits mailing list