[compiler-rt] a47d435 - [dfsan] Propagate origins for callsites

Jianzhou Zhao via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 26 11:13:10 PST 2021


Author: Jianzhou Zhao
Date: 2021-02-26T19:12:03Z
New Revision: a47d435bc4307ca71612dd03f09f12f7ff6b461f

URL: https://github.com/llvm/llvm-project/commit/a47d435bc4307ca71612dd03f09f12f7ff6b461f
DIFF: https://github.com/llvm/llvm-project/commit/a47d435bc4307ca71612dd03f09f12f7ff6b461f.diff

LOG: [dfsan] Propagate origins for callsites

This is a part of https://reviews.llvm.org/D95835.

Each customized function has two wrappers. The
first one dfsw is for the normal shadow propagation. The second one dfso is used
when origin tracking is on. It calls the first one, and does additional
origin propagation. Which one to use can be decided at instrumentation
time. This is to ensure minimal additional overhead when origin tracking
is off.

Reviewed-by: morehouse

Differential Revision: https://reviews.llvm.org/D97483

Added: 
    llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll

Modified: 
    compiler-rt/lib/dfsan/dfsan.syms.extra
    llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/dfsan/dfsan.syms.extra b/compiler-rt/lib/dfsan/dfsan.syms.extra
index 0d507eef0814..e34766c3ba1c 100644
--- a/compiler-rt/lib/dfsan/dfsan.syms.extra
+++ b/compiler-rt/lib/dfsan/dfsan.syms.extra
@@ -1,3 +1,4 @@
 dfsan_*
 __dfsan_*
 __dfsw_*
+__dfso_*

diff  --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index b44094e925e7..2c1850b5acb5 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -503,6 +503,7 @@ struct DFSanFunction {
   DataFlowSanitizer::InstrumentedABI IA;
   bool IsNativeABI;
   AllocaInst *LabelReturnAlloca = nullptr;
+  AllocaInst *OriginReturnAlloca = nullptr;
   DenseMap<Value *, Value *> ValShadowMap;
   DenseMap<Value *, Value *> ValOriginMap;
   DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
@@ -662,6 +663,12 @@ class DFSanVisitor : public InstVisitor<DFSanVisitor> {
 
   // Combines origins for all of I's operands.
   void visitInstOperandOrigins(Instruction &I);
+
+  void addShadowArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
+                          IRBuilder<> &IRB);
+
+  void addOriginArguments(Function &F, CallBase &CB, std::vector<Value *> &Args,
+                          IRBuilder<> &IRB);
 };
 
 } // end anonymous namespace
@@ -695,6 +702,13 @@ FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
   Type *RetType = T->getReturnType();
   if (!RetType->isVoidTy())
     ArgTypes.push_back(PrimitiveShadowPtrTy);
+
+  if (shouldTrackOrigins()) {
+    ArgTypes.append(T->getNumParams(), OriginTy);
+    if (!RetType->isVoidTy())
+      ArgTypes.push_back(OriginPtrTy);
+  }
+
   return FunctionType::get(T->getReturnType(), ArgTypes, false);
 }
 
@@ -706,26 +720,37 @@ TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
   // parameters of the custom function, so that parameter attributes
   // at call sites can be updated.
   std::vector<unsigned> ArgumentIndexMapping;
-  for (unsigned i = 0, ie = T->getNumParams(); i != ie; ++i) {
-    Type* param_type = T->getParamType(i);
+  for (unsigned I = 0, E = T->getNumParams(); I != E; ++I) {
+    Type *Param_type = T->getParamType(I);
     FunctionType *FT;
-    if (isa<PointerType>(param_type) && (FT = dyn_cast<FunctionType>(
-            cast<PointerType>(param_type)->getElementType()))) {
+    if (isa<PointerType>(Param_type) &&
+        (FT = dyn_cast<FunctionType>(
+             cast<PointerType>(Param_type)->getElementType()))) {
       ArgumentIndexMapping.push_back(ArgTypes.size());
       ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
       ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
     } else {
       ArgumentIndexMapping.push_back(ArgTypes.size());
-      ArgTypes.push_back(param_type);
+      ArgTypes.push_back(Param_type);
     }
   }
-  for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
+  for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
     ArgTypes.push_back(PrimitiveShadowTy);
   if (T->isVarArg())
     ArgTypes.push_back(PrimitiveShadowPtrTy);
   Type *RetType = T->getReturnType();
   if (!RetType->isVoidTy())
     ArgTypes.push_back(PrimitiveShadowPtrTy);
+
+  if (shouldTrackOrigins()) {
+    for (unsigned I = 0, E = T->getNumParams(); I != E; ++I)
+      ArgTypes.push_back(OriginTy);
+    if (T->isVarArg())
+      ArgTypes.push_back(OriginPtrTy);
+    if (!RetType->isVoidTy())
+      ArgTypes.push_back(OriginPtrTy);
+  }
+
   return TransformedFunction(
       T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()),
       ArgumentIndexMapping);
@@ -746,8 +771,10 @@ bool DataFlowSanitizer::isZeroShadow(Value *V) {
 }
 
 bool DataFlowSanitizer::shouldTrackOrigins() {
-  return ClTrackOrigins && getInstrumentedABI() == DataFlowSanitizer::IA_TLS &&
-         ClFast16Labels;
+  static const bool kShouldTrackOrigins =
+      ClTrackOrigins && getInstrumentedABI() == DataFlowSanitizer::IA_TLS &&
+      ClFast16Labels;
+  return kShouldTrackOrigins;
 }
 
 bool DataFlowSanitizer::shouldTrackFieldsAndIndices() {
@@ -1059,7 +1086,8 @@ Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
       Args.push_back(&*AI);
     CallInst *CI = CallInst::Create(FT, &*F->arg_begin(), Args, "", BB);
     ReturnInst *RI;
-    if (FT->getReturnType()->isVoidTy())
+    Type *RetType = FT->getReturnType();
+    if (RetType->isVoidTy())
       RI = ReturnInst::Create(*Ctx, BB);
     else
       RI = ReturnInst::Create(*Ctx, CI, BB);
@@ -1067,17 +1095,34 @@ Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
     // F is called by a wrapped custom function with primitive shadows. So
     // its arguments and return value need conversion.
     DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
-    Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI;
+    Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI;
+    ++ValAI;
     for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N) {
       Value *Shadow =
           DFSF.expandFromPrimitiveShadow(ValAI->getType(), &*ShadowAI, CI);
       DFSF.ValShadowMap[&*ValAI] = Shadow;
     }
+    Function::arg_iterator RetShadowAI = ShadowAI;
+    const bool ShouldTrackOrigins = shouldTrackOrigins();
+    if (ShouldTrackOrigins) {
+      ValAI = F->arg_begin();
+      ++ValAI;
+      Function::arg_iterator OriginAI = ShadowAI;
+      if (!RetType->isVoidTy())
+        ++OriginAI;
+      for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++OriginAI, --N) {
+        DFSF.ValOriginMap[&*ValAI] = &*OriginAI;
+      }
+    }
     DFSanVisitor(DFSF).visitCallInst(*CI);
-    if (!FT->getReturnType()->isVoidTy()) {
+    if (!RetType->isVoidTy()) {
       Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(
           DFSF.getShadow(RI->getReturnValue()), RI);
-      new StoreInst(PrimitiveShadow, &*std::prev(F->arg_end()), RI);
+      new StoreInst(PrimitiveShadow, &*RetShadowAI, RI);
+      if (ShouldTrackOrigins) {
+        Value *Origin = DFSF.getOrigin(RI->getReturnValue());
+        new StoreInst(Origin, &*std::prev(F->arg_end()), RI);
+      }
     }
   }
 
@@ -1367,7 +1412,9 @@ bool DataFlowSanitizer::runImpl(Module &M) {
               : GlobalValue::LinkOnceODRLinkage;
 
       Function *NewF = buildWrapperFunction(
-          &F, std::string("dfsw$") + std::string(F.getName()),
+          &F,
+          (shouldTrackOrigins() ? std::string("dfso$") : std::string("dfsw$")) +
+              std::string(F.getName()),
           wrapperLinkage, NewFT);
       if (getInstrumentedABI() == IA_TLS)
         NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs);
@@ -2387,6 +2434,83 @@ void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
   }
 }
 
+void DFSanVisitor::addShadowArguments(Function &F, CallBase &CB,
+                                      std::vector<Value *> &Args,
+                                      IRBuilder<> &IRB) {
+  FunctionType *FT = F.getFunctionType();
+
+  auto *I = CB.arg_begin();
+
+  // Adds non-variable argument shadows.
+  for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
+    Args.push_back(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB));
+
+  // Adds variable argument shadows.
+  if (FT->isVarArg()) {
+    auto *LabelVATy = ArrayType::get(DFSF.DFS.PrimitiveShadowTy,
+                                     CB.arg_size() - FT->getNumParams());
+    auto *LabelVAAlloca =
+        new AllocaInst(LabelVATy, getDataLayout().getAllocaAddrSpace(),
+                       "labelva", &DFSF.F->getEntryBlock().front());
+
+    for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
+      auto *LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, N);
+      IRB.CreateStore(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB),
+                      LabelVAPtr);
+    }
+
+    Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
+  }
+
+  // Adds the return value shadow.
+  if (!FT->getReturnType()->isVoidTy()) {
+    if (!DFSF.LabelReturnAlloca) {
+      DFSF.LabelReturnAlloca = new AllocaInst(
+          DFSF.DFS.PrimitiveShadowTy, getDataLayout().getAllocaAddrSpace(),
+          "labelreturn", &DFSF.F->getEntryBlock().front());
+    }
+    Args.push_back(DFSF.LabelReturnAlloca);
+  }
+}
+
+void DFSanVisitor::addOriginArguments(Function &F, CallBase &CB,
+                                      std::vector<Value *> &Args,
+                                      IRBuilder<> &IRB) {
+  FunctionType *FT = F.getFunctionType();
+
+  auto *I = CB.arg_begin();
+
+  // Add non-variable argument origins.
+  for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
+    Args.push_back(DFSF.getOrigin(*I));
+
+  // Add variable argument origins.
+  if (FT->isVarArg()) {
+    auto *OriginVATy =
+        ArrayType::get(DFSF.DFS.OriginTy, CB.arg_size() - FT->getNumParams());
+    auto *OriginVAAlloca =
+        new AllocaInst(OriginVATy, getDataLayout().getAllocaAddrSpace(),
+                       "originva", &DFSF.F->getEntryBlock().front());
+
+    for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
+      auto *OriginVAPtr = IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, N);
+      IRB.CreateStore(DFSF.getOrigin(*I), OriginVAPtr);
+    }
+
+    Args.push_back(IRB.CreateStructGEP(OriginVATy, OriginVAAlloca, 0));
+  }
+
+  // Add the return value origin.
+  if (!FT->getReturnType()->isVoidTy()) {
+    if (!DFSF.OriginReturnAlloca) {
+      DFSF.OriginReturnAlloca = new AllocaInst(
+          DFSF.DFS.OriginTy, getDataLayout().getAllocaAddrSpace(),
+          "originreturn", &DFSF.F->getEntryBlock().front());
+    }
+    Args.push_back(DFSF.OriginReturnAlloca);
+  }
+}
+
 bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
   IRBuilder<> IRB(&CB);
   switch (DFSF.DFS.getWrapperKind(&F)) {
@@ -2395,10 +2519,12 @@ bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
     IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
                    IRB.CreateGlobalStringPtr(F.getName()));
     DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
+    DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
     return true;
   case DataFlowSanitizer::WK_Discard:
     CB.setCalledFunction(&F);
     DFSF.setShadow(&CB, DFSF.DFS.getZeroShadow(&CB));
+    DFSF.setOrigin(&CB, DFSF.DFS.ZeroOrigin);
     return true;
   case DataFlowSanitizer::WK_Functional:
     CB.setCalledFunction(&F);
@@ -2412,9 +2538,10 @@ bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
     if (!CI)
       return false;
 
+    const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
     FunctionType *FT = F.getFunctionType();
     TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT);
-    std::string CustomFName = "__dfsw_";
+    std::string CustomFName = ShouldTrackOrigins ? "__dfso_" : "__dfsw_";
     CustomFName += F.getName();
     FunctionCallee CustomF = DFSF.DFS.Mod->getOrInsertFunction(
         CustomFName, CustomFn.TransformedType);
@@ -2451,38 +2578,14 @@ bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
       }
     }
 
-    // Adds non-variable argument shadows.
-    I = CB.arg_begin();
+    // Adds shadow arguments.
     const unsigned ShadowArgStart = Args.size();
-    for (unsigned N = FT->getNumParams(); N != 0; ++I, --N)
-      Args.push_back(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB));
-
-    // Adds variable argument shadows.
-    if (FT->isVarArg()) {
-      auto *LabelVATy = ArrayType::get(DFSF.DFS.PrimitiveShadowTy,
-                                       CB.arg_size() - FT->getNumParams());
-      auto *LabelVAAlloca =
-          new AllocaInst(LabelVATy, getDataLayout().getAllocaAddrSpace(),
-                         "labelva", &DFSF.F->getEntryBlock().front());
-
-      for (unsigned N = 0; I != CB.arg_end(); ++I, ++N) {
-        auto *LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, N);
-        IRB.CreateStore(DFSF.collapseToPrimitiveShadow(DFSF.getShadow(*I), &CB),
-                        LabelVAPtr);
-      }
-
-      Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
-    }
+    addShadowArguments(F, CB, Args, IRB);
 
-    // Adds the return value shadow.
-    if (!FT->getReturnType()->isVoidTy()) {
-      if (!DFSF.LabelReturnAlloca) {
-        DFSF.LabelReturnAlloca = new AllocaInst(
-            DFSF.DFS.PrimitiveShadowTy, getDataLayout().getAllocaAddrSpace(),
-            "labelreturn", &DFSF.F->getEntryBlock().front());
-      }
-      Args.push_back(DFSF.LabelReturnAlloca);
-    }
+    // Adds origin arguments.
+    const unsigned OriginArgStart = Args.size();
+    if (ShouldTrackOrigins)
+      addOriginArguments(F, CB, Args, IRB);
 
     // Adds variable arguments.
     append_range(Args, drop_begin(CB.args(), FT->getNumParams()));
@@ -2500,14 +2603,25 @@ bool DFSanVisitor::visitWrappedCallBase(Function &F, CallBase &CB) {
       if (CustomCI->getArgOperand(ArgNo)->getType() ==
           DFSF.DFS.PrimitiveShadowTy)
         CustomCI->addParamAttr(ArgNo, Attribute::ZExt);
+      if (ShouldTrackOrigins) {
+        const unsigned OriginArgNo = OriginArgStart + N;
+        if (CustomCI->getArgOperand(OriginArgNo)->getType() ==
+            DFSF.DFS.OriginTy)
+          CustomCI->addParamAttr(OriginArgNo, Attribute::ZExt);
+      }
     }
 
-    // Loads the return value shadow.
+    // Loads the return value shadow and origin.
     if (!FT->getReturnType()->isVoidTy()) {
       LoadInst *LabelLoad =
           IRB.CreateLoad(DFSF.DFS.PrimitiveShadowTy, DFSF.LabelReturnAlloca);
       DFSF.setShadow(CustomCI, DFSF.expandFromPrimitiveShadow(
                                    FT->getReturnType(), LabelLoad, &CB));
+      if (ShouldTrackOrigins) {
+        LoadInst *OriginLoad =
+            IRB.CreateLoad(DFSF.DFS.OriginTy, DFSF.OriginReturnAlloca);
+        DFSF.setOrigin(CustomCI, OriginLoad);
+      }
     }
 
     CI->replaceAllUsesWith(CustomCI);
@@ -2537,12 +2651,22 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
 
   IRBuilder<> IRB(&CB);
 
+  const bool ShouldTrackOrigins = DFSF.DFS.shouldTrackOrigins();
   FunctionType *FT = CB.getFunctionType();
   if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
     // Stores argument shadows.
     unsigned ArgOffset = 0;
     const DataLayout &DL = getDataLayout();
     for (unsigned I = 0, N = FT->getNumParams(); I != N; ++I) {
+      if (ShouldTrackOrigins) {
+        // Ignore overflowed origins
+        Value *ArgShadow = DFSF.getShadow(CB.getArgOperand(I));
+        if (I < DFSF.DFS.kNumOfElementsInArgOrgTLS &&
+            !DFSF.DFS.isZeroShadow(ArgShadow))
+          IRB.CreateStore(DFSF.getOrigin(CB.getArgOperand(I)),
+                          DFSF.getArgOriginTLS(I, IRB));
+      }
+
       unsigned Size =
           DL.getTypeAllocSize(DFSF.DFS.getShadowTy(FT->getParamType(I)));
       // Stop storing if arguments' size overflows. Inside a function, arguments
@@ -2588,6 +2712,13 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
         DFSF.setShadow(&CB, LI);
         DFSF.NonZeroChecks.push_back(LI);
       }
+
+      if (ShouldTrackOrigins) {
+        LoadInst *LI = NextIRB.CreateLoad(
+            DFSF.DFS.OriginTy, DFSF.getRetvalOriginTLS(), "_dfsret_o");
+        DFSF.SkipInsts.insert(LI);
+        DFSF.setOrigin(&CB, LI);
+      }
     }
   }
 

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll
new file mode 100644
index 000000000000..dc6249fea5ec
--- /dev/null
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll
@@ -0,0 +1,315 @@
+; RUN: opt < %s -dfsan -dfsan-track-origins=1 -dfsan-fast-16-labels=true -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @discard(i32 %a, i32 %b) {
+  ret i32 0
+}
+
+define i32 @call_discard(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_discard"
+  ; CHECK: %r = call i32 @discard(i32 %a, i32 %b)
+  ; CHECK: store i32 0, i32* @__dfsan_retval_origin_tls, align 4
+  ; CHECK: ret i32 %r
+
+  %r = call i32 @discard(i32 %a, i32 %b)
+  ret i32 %r
+}
+
+; CHECK: i32 @functional(i32 %a, i32 %b)
+define i32 @functional(i32 %a, i32 %b) {
+  %c = add i32 %a, %b
+  ret i32 %c
+}
+
+define i32 @call_functional(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_functional"
+  ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+  ; CHECK: [[RO:%.*]] = select i1 {{.*}}, i32 [[BO]], i32 [[AO]]
+  ; CHECK: store i32 [[RO]], i32* @__dfsan_retval_origin_tls, align 4
+
+  %r = call i32 @functional(i32 %a, i32 %b)
+  ret i32 %r
+}
+
+define i32 @uninstrumented(i32 %a, i32 %b) {
+  %c = add i32 %a, %b
+  ret i32 %c
+}
+
+define i32 @call_uninstrumented(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_uninstrumented"
+  ; CHECK: %r = call i32 @uninstrumented(i32 %a, i32 %b)
+  ; CHECK: store i32 0, i32* @__dfsan_retval_origin_tls, align 4
+  ; CHECK: ret i32 %r
+
+  %r = call i32 @uninstrumented(i32 %a, i32 %b)
+  ret i32 %r
+}
+
+define i32 @g(i32 %a, i32 %b) {
+  %c = add i32 %a, %b
+  ret i32 %c
+}
+
+ at discardg = alias i32 (i32, i32), i32 (i32, i32)* @g
+
+define i32 @call_discardg(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_discardg"
+  ; CHECK: %r = call i32 @discardg(i32 %a, i32 %b)
+  ; CHECK: store i32 0, i32* @__dfsan_retval_origin_tls, align 4
+  ; CHECK: ret i32 %r
+
+  %r = call i32 @discardg(i32 %a, i32 %b)
+  ret i32 %r
+}
+
+define void @custom_without_ret(i32 %a, i32 %b) {
+  ret void
+}
+
+define i32 @custom_with_ret(i32 %a, i32 %b) {
+  %c = add i32 %a, %b
+  ret i32 %c
+}
+
+define void @custom_varg_without_ret(i32 %a, i32 %b, ...) {
+  ret void
+}
+
+define i32 @custom_varg_with_ret(i32 %a, i32 %b, ...) {
+  %c = add i32 %a, %b
+  ret i32 %c
+}
+
+define i32 @custom_cb_with_ret(i32 (i32, i32)* %cb, i32 %a, i32 %b) {
+  %r = call i32 %cb(i32 %a, i32 %b)
+  ret i32 %r
+}
+
+define i32 @cb_with_ret(i32 %a, i32 %b) {
+  %c = add i32 %a, %b
+  ret i32 %c
+}
+
+define void @custom_cb_without_ret(void (i32, i32)* %cb, i32 %a, i32 %b) {
+  call void %cb(i32 %a, i32 %b)
+  ret void
+}
+
+define void @cb_without_ret(i32 %a, i32 %b) {
+  ret void
+}
+
+define i32 (i32, i32)* @ret_custom() {
+  ; CHECK: @"dfs$ret_custom"
+  ; CHECK: store i32 0, i32* @__dfsan_retval_origin_tls, align 4
+  
+  ret i32 (i32, i32)* @custom_with_ret
+}
+
+define void @call_custom_without_ret(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_custom_without_ret"
+  ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+  ; CHECK: [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+  ; CHECK: [[AS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+  ; CHECK: call void @__dfso_custom_without_ret(i32 %a, i32 %b, i16 zeroext [[AS]], i16 zeroext [[BS]], i32 zeroext [[AO]], i32 zeroext [[BO]])
+  ; CHECK-NEXT: ret void
+
+  call void @custom_without_ret(i32 %a, i32 %b)
+  ret void
+}
+
+define i32 @call_custom_with_ret(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_custom_with_ret"
+  ; CHECK: %originreturn = alloca i32, align 4
+  ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+  ; CHECK: %labelreturn = alloca i16, align 2
+  ; CHECK: [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+  ; CHECK: [[AS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+  ; CHECK: {{.*}} = call i32 @__dfso_custom_with_ret(i32 %a, i32 %b, i16 zeroext [[AS]], i16 zeroext [[BS]], i16* %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], i32* %originreturn)
+  ; CHECK: [[RS:%.*]] = load i16, i16* %labelreturn, align 2
+  ; CHECK: [[RO:%.*]] = load i32, i32* %originreturn, align 4
+  ; CHECK: store i16 [[RS]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+  ; CHECK: store i32 [[RO]], i32* @__dfsan_retval_origin_tls, align 4
+
+  %r = call i32 @custom_with_ret(i32 %a, i32 %b)
+  ret i32 %r
+}
+
+define void @call_custom_varg_without_ret(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_custom_varg_without_ret"
+  ; CHECK: %originva = alloca [1 x i32], align 4
+  ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+  ; CHECK: %labelva = alloca [1 x i16], align 2
+  ; CHECK: [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+  ; CHECK: [[AS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i16], [1 x i16]* %labelva, i32 0, i32 0
+  ; CHECK: store i16 [[AS]], i16* [[VS0]], align 2
+  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i16], [1 x i16]* %labelva, i32 0, i32 0
+  ; CHECK: [[VO0:%.*]] = getelementptr inbounds [1 x i32], [1 x i32]* %originva, i32 0, i32 0
+  ; CHECK: store i32 [[AO]], i32* [[VO0]], align 4
+  ; CHECK: [[VO0:%.*]] = getelementptr inbounds [1 x i32], [1 x i32]* %originva, i32 0, i32 0
+  ; CHECK: call void (i32, i32, i16, i16, i16*, i32, i32, i32*, ...) @__dfso_custom_varg_without_ret(i32 %a, i32 %b, i16 zeroext [[AS]], i16 zeroext [[BS]], i16* [[VS0]], i32 zeroext [[AO]], i32 zeroext [[BO]], i32* [[VO0]], i32 %a)
+  ; CHECK-NEXT: ret void
+
+  call void (i32, i32, ...) @custom_varg_without_ret(i32 %a, i32 %b, i32 %a)
+  ret void
+}
+
+define i32 @call_custom_varg_with_ret(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_custom_varg_with_ret"
+  ; CHECK: %originreturn = alloca i32, align 4
+  ; CHECK: %originva = alloca [1 x i32], align 4
+  ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+  ; CHECK: %labelreturn = alloca i16, align 2
+  ; CHECK: %labelva = alloca [1 x i16], align 2
+  ; CHECK: [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+  ; CHECK: [[AS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i16], [1 x i16]* %labelva, i32 0, i32 0
+  ; CHECK: store i16 [[BS]], i16* [[VS0]], align 2
+  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i16], [1 x i16]* %labelva, i32 0, i32 0
+  ; CHECK: [[VO0:%.*]] = getelementptr inbounds [1 x i32], [1 x i32]* %originva, i32 0, i32 0
+  ; CHECK: store i32 [[BO]], i32* [[VO0]], align 4
+  ; CHECK: [[VO0:%.*]] = getelementptr inbounds [1 x i32], [1 x i32]* %originva, i32 0, i32 0
+  ; CHECK: {{.*}} = call i32 (i32, i32, i16, i16, i16*, i16*, i32, i32, i32*, i32*, ...) @__dfso_custom_varg_with_ret(i32 %a, i32 %b, i16 zeroext [[AS]], i16 zeroext [[BS]], i16* [[VS0]], i16* %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], i32* [[VO0]], i32* %originreturn, i32 %b)
+  ; CHECK: [[RS:%.*]] = load i16, i16* %labelreturn, align 2
+  ; CHECK: [[RO:%.*]] = load i32, i32* %originreturn, align 4
+  ; CHECK: store i16 [[RS]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+  ; CHECK: store i32 [[RO]], i32* @__dfsan_retval_origin_tls, align 4
+
+  %r = call i32 (i32, i32, ...) @custom_varg_with_ret(i32 %a, i32 %b, i32 %b)
+  ret i32 %r
+}
+
+define i32 @call_custom_cb_with_ret(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_custom_cb_with_ret"
+  ; CHECK: %originreturn = alloca i32, align 4
+  ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+  ; CHECK: %labelreturn = alloca i16, align 2
+  ; CHECK: [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+  ; CHECK: [[AS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+  ; CHECK: {{.*}} = call i32 @__dfso_custom_cb_with_ret(i32 (i32 (i32, i32)*, i32, i32, i16, i16, i16*, i32, i32, i32*)* @"dfst0$custom_cb_with_ret", i8* bitcast (i32 (i32, i32)* @"dfs$cb_with_ret" to i8*), i32 %a, i32 %b, i16 zeroext 0, i16 zeroext [[AS]], i16 zeroext [[BS]], i16* %labelreturn, i32 zeroext 0, i32 zeroext [[AO]], i32 zeroext [[BO]], i32* %originreturn)
+  ; CHECK: [[RS:%.*]] = load i16, i16* %labelreturn, align 2
+  ; CHECK: [[RO:%.*]] = load i32, i32* %originreturn, align 4
+  ; CHECK: store i16 [[RS]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+  ; CHECK: store i32 [[RO]], i32* @__dfsan_retval_origin_tls, align 4
+
+  %r = call i32 @custom_cb_with_ret(i32 (i32, i32)* @cb_with_ret, i32 %a, i32 %b)
+  ret i32 %r
+}
+
+define void @call_custom_cb_without_ret(i32 %a, i32 %b) {
+  ; CHECK: @"dfs$call_custom_cb_without_ret"
+  ; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+  ; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+  ; CHECK: [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+  ; CHECK: [[AS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+  ; CHECK: call void @__dfso_custom_cb_without_ret(void (void (i32, i32)*, i32, i32, i16, i16, i32, i32)* @"dfst0$custom_cb_without_ret", i8* bitcast (void (i32, i32)* @"dfs$cb_without_ret" to i8*), i32 %a, i32 %b, i16 zeroext 0, i16 zeroext [[AS]], i16 zeroext [[BS]], i32 zeroext 0, i32 zeroext [[AO]], i32 zeroext [[BO]])
+  ; CHECK-NEXT: ret void
+
+  call void @custom_cb_without_ret(void (i32, i32)* @cb_without_ret, i32 %a, i32 %b)
+  ret void
+}
+
+; CHECK: define i32 @discardg(i32 %0, i32 %1)
+; CHECK: [[R:%.*]] = call i32 @"dfs$g"
+; CHECK-NEXT: %_dfsret = load i16, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK-NEXT: %_dfsret_o = load i32, i32* @__dfsan_retval_origin_tls, align 4
+; CHECK-NEXT: ret i32 [[R]]
+
+; CHECK: define linkonce_odr void @"dfso$custom_without_ret"(i32 %0, i32 %1)
+; CHECK:  [[BO:%.*]]  = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+; CHECK-NEXT:  [[AO:%.*]]  = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK-NEXT:  [[BS:%.*]]  = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+; CHECK-NEXT:  [[AS:%.*]]  = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK-NEXT:  call void @__dfso_custom_without_ret(i32 %0, i32 %1, i16 zeroext [[AS]], i16 zeroext [[BS]], i32 zeroext [[AO]], i32 zeroext [[BO]])
+; CHECK-NEXT:  ret void
+
+; CHECK: define linkonce_odr i32 @"dfso$custom_with_ret"(i32 %0, i32 %1)
+; CHECK:  %originreturn = alloca i32, align 4
+; CHECK-NEXT:  [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+; CHECK-NEXT:  [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK-NEXT:  %labelreturn = alloca i16, align 2
+; CHECK-NEXT:  [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+; CHECK-NEXT:  [[AS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK-NEXT:  [[R:%.*]] = call i32 @__dfso_custom_with_ret(i32 %0, i32 %1, i16 zeroext [[AS]], i16 zeroext [[BS]], i16* %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], i32* %originreturn)
+; CHECK-NEXT:  [[RS:%.*]] = load i16, i16* %labelreturn, align 2
+; CHECK-NEXT:  [[RO:%.*]] = load i32, i32* %originreturn, align 4
+; CHECK-NEXT:  store i16 [[RS]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK-NEXT:  store i32 [[RO]], i32* @__dfsan_retval_origin_tls, align 4
+; CHECK-NEXT:  ret i32 [[R]]
+
+; CHECK: define linkonce_odr void @"dfso$custom_varg_without_ret"(i32 %0, i32 %1, ...)
+; CHECK:  call void @__dfsan_vararg_wrapper(i8* getelementptr inbounds ([24 x i8], [24 x i8]* @0, i32 0, i32 0))
+; CHECK-NEXT:  unreachable
+
+; CHECK: define linkonce_odr i32 @"dfso$custom_varg_with_ret"(i32 %0, i32 %1, ...)
+; CHECK:  call void @__dfsan_vararg_wrapper(i8* getelementptr inbounds ([21 x i8], [21 x i8]* @1, i32 0, i32 0))
+; CHECK-NEXT:  unreachable
+
+; CHECK: define linkonce_odr i32 @"dfso$custom_cb_with_ret"(i32 (i32, i32)* %0, i32 %1, i32 %2)
+; CHECK:  %originreturn = alloca i32, align 4
+; CHECK-NEXT:  [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+; CHECK-NEXT:  [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+; CHECK-NEXT:  [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK-NEXT:  %labelreturn = alloca i16, align 2
+; CHECK-NEXT:  [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 4) to i16*), align 2
+; CHECK-NEXT:  [[AS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+; CHECK-NEXT:  [[CS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK-NEXT:  [[C:%.*]] = bitcast i32 (i32, i32)* %0 to i8*
+; CHECK-NEXT:  [[R:%.*]] = call i32 @__dfso_custom_cb_with_ret(i32 (i32 (i32, i32)*, i32, i32, i16, i16, i16*, i32, i32, i32*)* @"dfst0$custom_cb_with_ret", i8* [[C]], i32 %1, i32 %2, i16 zeroext [[CS]], i16 zeroext [[AS]], i16 zeroext [[BS]], i16* %labelreturn, i32 zeroext [[CO]], i32 zeroext [[AO]], i32 zeroext [[BO]], i32* %originreturn)
+; CHECK-NEXT:  [[RS:%.*]] = load i16, i16* %labelreturn, align 2
+; CHECK-NEXT:  [[RO:%.*]] = load i32, i32* %originreturn, align 4
+; CHECK-NEXT:  store i16 [[RS]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK-NEXT:  store i32 [[RO]], i32* @__dfsan_retval_origin_tls, align 4
+; CHECK-NEXT:  ret i32 [[R]]
+
+; CHECK: define linkonce_odr void @"dfso$custom_cb_without_ret"(void (i32, i32)* %0, i32 %1, i32 %2)
+; CHECK:   [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
+; CHECK-NEXT:  [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+; CHECK-NEXT:  [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK-NEXT:  [[BS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 4) to i16*), align 2
+; CHECK-NEXT:  [[AS:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+; CHECK-NEXT:  [[CS:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK-NEXT:  [[C:%.*]] = bitcast void (i32, i32)* %0 to i8*
+; CHECK-NEXT:  call void @__dfso_custom_cb_without_ret(void (void (i32, i32)*, i32, i32, i16, i16, i32, i32)* @"dfst0$custom_cb_without_ret", i8* [[C]], i32 %1, i32 %2, i16 zeroext [[CS]], i16 zeroext [[AS]], i16 zeroext [[BS]], i32 zeroext [[CO]], i32 zeroext [[AO]], i32 zeroext [[BO]])
+; CHECK-NEXT:  ret void
+
+; CHECK: declare void @__dfso_custom_without_ret(i32, i32, i16, i16, i32, i32)
+
+; CHECK: declare i32 @__dfso_custom_with_ret(i32, i32, i16, i16, i16*, i32, i32, i32*)
+
+; CHECK: declare i32 @__dfso_custom_cb_with_ret(i32 (i32 (i32, i32)*, i32, i32, i16, i16, i16*, i32, i32, i32*)*, i8*, i32, i32, i16, i16, i16, i16*, i32, i32, i32, i32*)
+
+; CHECK: define linkonce_odr i32 @"dfst0$custom_cb_with_ret"(i32 (i32, i32)* %0, i32 %1, i32 %2, i16 %3, i16 %4, i16* %5, i32 %6, i32 %7, i32* %8)
+; CHECK:   store i32 %6, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK-NEXT:  store i16 %3, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK-NEXT:  store i32 %7, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+; CHECK-NEXT:  store i16 %4, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+; CHECK-NEXT:  %9 = call i32 %0(i32 %1, i32 %2)
+; CHECK-NEXT:  %_dfsret = load i16, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
+; CHECK-NEXT:  %_dfsret_o = load i32, i32* @__dfsan_retval_origin_tls, align 4
+; CHECK-NEXT:  store i16 %_dfsret, i16* %5, align 2
+; CHECK-NEXT:  store i32 %_dfsret_o, i32* %8, align 4
+; CHECK-NEXT:  ret i32 %9
+
+; CHECK: declare void @__dfso_custom_cb_without_ret(void (void (i32, i32)*, i32, i32, i16, i16, i32, i32)*, i8*, i32, i32, i16, i16, i16, i32, i32, i32)
+
+; CHECK: define linkonce_odr void @"dfst0$custom_cb_without_ret"(void (i32, i32)* %0, i32 %1, i32 %2, i16 %3, i16 %4, i32 %5, i32 %6)
+; CHECK:  store i32 %5, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
+; CHECK-NEXT:  store i16 %3, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+; CHECK-NEXT:  store i32 %6, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
+; CHECK-NEXT:  store i16 %4, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 2) to i16*), align 2
+; CHECK-NEXT:  call void %0(i32 %1, i32 %2)
+; CHECK-NEXT:  ret void
+
+; CHECK: declare void @__dfso_custom_varg_without_ret(i32, i32, i16, i16, i16*, i32, i32, i32*, ...)
+
+; CHECK: declare i32 @__dfso_custom_varg_with_ret(i32, i32, i16, i16, i16*, i16*, i32, i32, i32*, i32*, ...)
\ No newline at end of file

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
new file mode 100644
index 000000000000..61a12e653397
--- /dev/null
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
@@ -0,0 +1,80 @@
+; RUN: opt < %s -dfsan -dfsan-track-origins=1 -dfsan-fast-16-labels=true -S | FileCheck %s --check-prefix=CHECK
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i1 @arg_overflow(
+i1   %a0, i1   %a1, i1   %a2, i1   %a3, i1   %a4, i1   %a5, i1   %a6, i1   %a7, i1   %a8, i1   %a9,
+i1  %a10, i1  %a11, i1  %a12, i1  %a13, i1  %a14, i1  %a15, i1  %a16, i1  %a17, i1  %a18, i1  %a19,
+i1  %a20, i1  %a21, i1  %a22, i1  %a23, i1  %a24, i1  %a25, i1  %a26, i1  %a27, i1  %a28, i1  %a29,
+i1  %a30, i1  %a31, i1  %a32, i1  %a33, i1  %a34, i1  %a35, i1  %a36, i1  %a37, i1  %a38, i1  %a39,
+i1  %a40, i1  %a41, i1  %a42, i1  %a43, i1  %a44, i1  %a45, i1  %a46, i1  %a47, i1  %a48, i1  %a49,
+i1  %a50, i1  %a51, i1  %a52, i1  %a53, i1  %a54, i1  %a55, i1  %a56, i1  %a57, i1  %a58, i1  %a59,
+i1  %a60, i1  %a61, i1  %a62, i1  %a63, i1  %a64, i1  %a65, i1  %a66, i1  %a67, i1  %a68, i1  %a69,
+i1  %a70, i1  %a71, i1  %a72, i1  %a73, i1  %a74, i1  %a75, i1  %a76, i1  %a77, i1  %a78, i1  %a79,
+i1  %a80, i1  %a81, i1  %a82, i1  %a83, i1  %a84, i1  %a85, i1  %a86, i1  %a87, i1  %a88, i1  %a89,
+i1  %a90, i1  %a91, i1  %a92, i1  %a93, i1  %a94, i1  %a95, i1  %a96, i1  %a97, i1  %a98, i1  %a99,
+i1 %a100, i1 %a101, i1 %a102, i1 %a103, i1 %a104, i1 %a105, i1 %a106, i1 %a107, i1 %a108, i1 %a109,
+i1 %a110, i1 %a111, i1 %a112, i1 %a113, i1 %a114, i1 %a115, i1 %a116, i1 %a117, i1 %a118, i1 %a119,
+i1 %a120, i1 %a121, i1 %a122, i1 %a123, i1 %a124, i1 %a125, i1 %a126, i1 %a127, i1 %a128, i1 %a129,
+i1 %a130, i1 %a131, i1 %a132, i1 %a133, i1 %a134, i1 %a135, i1 %a136, i1 %a137, i1 %a138, i1 %a139,
+i1 %a140, i1 %a141, i1 %a142, i1 %a143, i1 %a144, i1 %a145, i1 %a146, i1 %a147, i1 %a148, i1 %a149,
+i1 %a150, i1 %a151, i1 %a152, i1 %a153, i1 %a154, i1 %a155, i1 %a156, i1 %a157, i1 %a158, i1 %a159,
+i1 %a160, i1 %a161, i1 %a162, i1 %a163, i1 %a164, i1 %a165, i1 %a166, i1 %a167, i1 %a168, i1 %a169,
+i1 %a170, i1 %a171, i1 %a172, i1 %a173, i1 %a174, i1 %a175, i1 %a176, i1 %a177, i1 %a178, i1 %a179,
+i1 %a180, i1 %a181, i1 %a182, i1 %a183, i1 %a184, i1 %a185, i1 %a186, i1 %a187, i1 %a188, i1 %a189,
+i1 %a190, i1 %a191, i1 %a192, i1 %a193, i1 %a194, i1 %a195, i1 %a196, i1 %a197, i1 %a198, i1 %a199,
+i1 %a200
+) {
+  ; CHECK: @"dfs$arg_overflow"
+  ; CHECK: [[A199:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
+  ; CHECK: store i32 [[A199]], i32* @__dfsan_retval_origin_tls, align 4
+
+  %r = add i1 %a199, %a200
+  ret i1 %r
+}
+
+define i1 @param_overflow(i1 %a) {
+  ; CHECK: @"dfs$param_overflow"
+  ; CHECK: store i32 %1, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
+  ; CHECK-NEXT: store i16 %2, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 398) to i16*), align 2
+  ; CHECK-NEXT: store i16 %2, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 400) to i16*), align 2
+  ; CHECK-NEXT: %r = call i1 @"dfs$arg_overflow"
+  ; CHECK: %_dfsret_o = load i32, i32* @__dfsan_retval_origin_tls, align 4
+  ; CHECK: store i32 %_dfsret_o, i32* @__dfsan_retval_origin_tls, align 4
+
+  %r = call i1 @arg_overflow(
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
+i1 %a
+)
+  ret i1 %r
+}
+
+declare void @foo(i1 %a)
+
+define void @param_with_zero_shadow() {
+  ; CHECK: @"dfs$param_with_zero_shadow"
+  ; CHECK-NEXT: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align 2
+  ; CHECK-NEXT: call void @"dfs$foo"(i1 true)
+
+  call void @foo(i1 1)
+  ret void
+}


        


More information about the llvm-commits mailing list