[llvm] a3e56a8 - [KMSAN] Enable on SystemZ
Ilya Leoshkevich via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 27 05:21:55 PDT 2023
Author: Ilya Leoshkevich
Date: 2023-04-27T13:44:54+02:00
New Revision: a3e56a8792ffaf3a3d3538736e1042b8db45ab89
URL: https://github.com/llvm/llvm-project/commit/a3e56a8792ffaf3a3d3538736e1042b8db45ab89
DIFF: https://github.com/llvm/llvm-project/commit/a3e56a8792ffaf3a3d3538736e1042b8db45ab89.diff
LOG: [KMSAN] Enable on SystemZ
Enable -fsanitize=kernel-memory support in Clang.
The x86_64 ABI requires that shadow_origin_ptr_t must be returned via a
register pair, and the s390x ABI requires that it must be returned via
memory pointed to by a hidden parameter. Normally Clang takes care of
the ABI, but the sanitizers run long after it, so unfortunately they
have to duplicate the ABI logic.
Therefore add a special case for SystemZ and manually emit the
s390x-ABI-compliant calling sequences. Since it's only 2 architectures,
do not create a VarArgHelper-like abstraction layer.
The kernel functions are compiled with the "packed-stack" and
"use-soft-float" attributes. For the "packed-stack" functions, it's not
correct for copyRegSaveArea() to copy 160 bytes of shadow and origins,
since the save area is dynamically sized. Things are greatly simplified
by the fact that the vararg "use-soft-float" functions use precisely
56 bytes in order to save the argument registers to where va_arg() can
find them.
Make copyRegSaveArea() copy only 56 bytes in the "use-soft-float" case.
The "packed-stack" && !"use-soft-float" case has no practical uses at
the moment, so leave it for the future.
Add tests.
Reviewed By: eugenis
Differential Revision: https://reviews.llvm.org/D148596
Added:
llvm/test/Instrumentation/MemorySanitizer/SystemZ/basic-kernel.ll
Modified:
clang/lib/Driver/ToolChains/Linux.cpp
llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
Removed:
################################################################################
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index 1fbd26aed596d..5b1922970ce2b 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -795,7 +795,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64 || IsSystemZ ||
IsLoongArch64)
Res |= SanitizerKind::Thread;
- if (IsX86_64)
+ if (IsX86_64 || IsSystemZ)
Res |= SanitizerKind::KernelMemory;
if (IsX86 || IsX86_64)
Res |= SanitizerKind::Function;
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 93667c9482921..e2269c2a8638d 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -122,6 +122,10 @@
/// Arbitrary sized accesses are handled with:
/// __msan_metadata_ptr_for_load_n(ptr, size)
/// __msan_metadata_ptr_for_store_n(ptr, size);
+/// Note that the sanitizer code has to deal with how shadow/origin pairs
+/// returned by the these functions are represented in
diff erent ABIs. In
+/// the X86_64 ABI they are returned in RDX:RAX, and in the SystemZ ABI they
+/// are written to memory pointed to by a hidden parameter.
/// - TLS variables are stored in a single per-task struct. A call to a
/// function __msan_get_context_state() returning a pointer to that struct
/// is inserted into every instrumented function before the entry block;
@@ -135,7 +139,7 @@
/// Also, KMSAN currently ignores uninitialized memory passed into inline asm
/// calls, making sure we're on the safe side wrt. possible false positives.
///
-/// KernelMemorySanitizer only supports X86_64 at the moment.
+/// KernelMemorySanitizer only supports X86_64 and SystemZ at the moment.
///
//
// FIXME: This sanitizer does not yet handle scalable vectors
@@ -543,6 +547,10 @@ class MemorySanitizer {
void createKernelApi(Module &M, const TargetLibraryInfo &TLI);
void createUserspaceApi(Module &M, const TargetLibraryInfo &TLI);
+ template <typename... ArgsTy>
+ FunctionCallee getOrInsertMsanMetadataFunction(Module &M, StringRef Name,
+ ArgsTy... Args);
+
/// True if we're compiling the Linux kernel.
bool CompileKernel;
/// Track origins (allocation points) of uninitialized values.
@@ -550,6 +558,7 @@ class MemorySanitizer {
bool Recover;
bool EagerChecks;
+ Triple TargetTriple;
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
@@ -620,13 +629,18 @@ class MemorySanitizer {
/// Functions for poisoning/unpoisoning local variables
FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
- /// Each of the MsanMetadataPtrXxx functions returns a pair of shadow/origin
- /// pointers.
+ /// Pair of shadow/origin pointers.
+ Type *MsanMetadata;
+
+ /// Each of the MsanMetadataPtrXxx functions returns a MsanMetadata.
FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
FunctionCallee MsanMetadataPtrForLoad_1_8[4];
FunctionCallee MsanMetadataPtrForStore_1_8[4];
FunctionCallee MsanInstrumentAsmStoreFn;
+ /// Storage for return values of the MsanMetadataPtrXxx functions.
+ Value *MsanMetadataAlloca;
+
/// Helper to choose between
diff erent MsanMetadataPtrXxx().
FunctionCallee getKmsanShadowOriginAccessFn(bool isStore, int size);
@@ -729,6 +743,21 @@ static GlobalVariable *createPrivateConstGlobalForString(Module &M,
GlobalValue::PrivateLinkage, StrConst, "");
}
+template <typename... ArgsTy>
+FunctionCallee
+MemorySanitizer::getOrInsertMsanMetadataFunction(Module &M, StringRef Name,
+ ArgsTy... Args) {
+ if (TargetTriple.getArch() == Triple::systemz) {
+ // SystemZ ABI: shadow/origin pair is returned via a hidden parameter.
+ return M.getOrInsertFunction(Name, Type::getVoidTy(*C),
+ PointerType::get(MsanMetadata, 0),
+ std::forward<ArgsTy>(Args)...);
+ }
+
+ return M.getOrInsertFunction(Name, MsanMetadata,
+ std::forward<ArgsTy>(Args)...);
+}
+
/// Create KMSAN API callbacks.
void MemorySanitizer::createKernelApi(Module &M, const TargetLibraryInfo &TLI) {
IRBuilder<> IRB(*C);
@@ -758,25 +787,25 @@ void MemorySanitizer::createKernelApi(Module &M, const TargetLibraryInfo &TLI) {
MsanGetContextStateFn = M.getOrInsertFunction(
"__msan_get_context_state", PointerType::get(MsanContextStateTy, 0));
- Type *RetTy = StructType::get(PointerType::get(IRB.getInt8Ty(), 0),
- PointerType::get(IRB.getInt32Ty(), 0));
+ MsanMetadata = StructType::get(PointerType::get(IRB.getInt8Ty(), 0),
+ PointerType::get(IRB.getInt32Ty(), 0));
for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
std::string name_load =
"__msan_metadata_ptr_for_load_" + std::to_string(size);
std::string name_store =
"__msan_metadata_ptr_for_store_" + std::to_string(size);
- MsanMetadataPtrForLoad_1_8[ind] = M.getOrInsertFunction(
- name_load, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
- MsanMetadataPtrForStore_1_8[ind] = M.getOrInsertFunction(
- name_store, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
+ MsanMetadataPtrForLoad_1_8[ind] = getOrInsertMsanMetadataFunction(
+ M, name_load, PointerType::get(IRB.getInt8Ty(), 0));
+ MsanMetadataPtrForStore_1_8[ind] = getOrInsertMsanMetadataFunction(
+ M, name_store, PointerType::get(IRB.getInt8Ty(), 0));
}
- MsanMetadataPtrForLoadN = M.getOrInsertFunction(
- "__msan_metadata_ptr_for_load_n", RetTy,
- PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
- MsanMetadataPtrForStoreN = M.getOrInsertFunction(
- "__msan_metadata_ptr_for_store_n", RetTy,
+ MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
+ M, "__msan_metadata_ptr_for_load_n", PointerType::get(IRB.getInt8Ty(), 0),
+ IRB.getInt64Ty());
+ MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
+ M, "__msan_metadata_ptr_for_store_n",
PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
// Functions for poisoning and unpoisoning memory.
@@ -927,6 +956,8 @@ FunctionCallee MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore,
void MemorySanitizer::initializeModule(Module &M) {
auto &DL = M.getDataLayout();
+ TargetTriple = Triple(M.getTargetTriple());
+
bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
// Check the overrides first
@@ -937,7 +968,6 @@ void MemorySanitizer::initializeModule(Module &M) {
CustomMapParams.OriginBase = ClOriginBase;
MapParams = &CustomMapParams;
} else {
- Triple TargetTriple(M.getTargetTriple());
switch (TargetTriple.getOS()) {
case Triple::FreeBSD:
switch (TargetTriple.getArch()) {
@@ -1464,6 +1494,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
MS.RetvalOriginTLS =
IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
{Zero, IRB.getInt32(6)}, "retval_origin");
+ if (MS.TargetTriple.getArch() == Triple::systemz)
+ MS.MsanMetadataAlloca = IRB.CreateAlloca(MS.MsanMetadata, 0u);
}
/// Add MemorySanitizer instrumentation to a function.
@@ -1696,6 +1728,18 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return std::make_pair(ShadowPtr, OriginPtr);
}
+ template <typename... ArgsTy>
+ Value *createMetadataCall(IRBuilder<> &IRB, FunctionCallee Callee,
+ ArgsTy... Args) {
+ if (MS.TargetTriple.getArch() == Triple::systemz) {
+ IRB.CreateCall(Callee,
+ {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
+ return IRB.CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
+ }
+
+ return IRB.CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
+ }
+
std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(Value *Addr,
IRBuilder<> &IRB,
Type *ShadowTy,
@@ -1708,12 +1752,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *AddrCast =
IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0));
if (Getter) {
- ShadowOriginPtrs = IRB.CreateCall(Getter, AddrCast);
+ ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
} else {
Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
- ShadowOriginPtrs = IRB.CreateCall(isStore ? MS.MsanMetadataPtrForStoreN
- : MS.MsanMetadataPtrForLoadN,
- {AddrCast, SizeVal});
+ ShadowOriginPtrs = createMetadataCall(
+ IRB,
+ isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
+ AddrCast, SizeVal);
}
Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0));
@@ -5684,11 +5729,15 @@ struct VarArgSystemZHelper : public VarArgHelper {
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment,
/*isStore*/ true);
// TODO(iii): copy only fragments filled by visitCallBase()
+ // TODO(iii): support packed-stack && !use-soft-float
+ // For use-soft-float functions, it is enough to copy just the GPRs.
+ unsigned RegSaveAreaSize =
+ IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
- SystemZRegSaveAreaSize);
+ RegSaveAreaSize);
if (MS.TrackOrigins)
IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
- Alignment, SystemZRegSaveAreaSize);
+ Alignment, RegSaveAreaSize);
}
void copyOverflowArea(IRBuilder<> &IRB, Value *VAListTag) {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/basic-kernel.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/basic-kernel.ll
new file mode 100644
index 0000000000000..5a8db5608adf7
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/basic-kernel.ll
@@ -0,0 +1,169 @@
+; RUN: opt < %s -S -mcpu=z13 -msan-kernel=1 -float-abi=soft -passes=msan 2>&1 | FileCheck %s
+
+target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"
+target triple = "s390x-unknown-linux-gnu"
+
+define void @Store1(ptr %p, i8 %x) sanitize_memory {
+entry:
+ store i8 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store1(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_store_1(ptr [[META_PTR]], ptr %p)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i8 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define void @Store2(ptr %p, i16 %x) sanitize_memory {
+entry:
+ store i16 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store2(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_store_2(ptr [[META_PTR]], ptr %p)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i16 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define void @Store4(ptr %p, i32 %x) sanitize_memory {
+entry:
+ store i32 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store4(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_store_4(ptr [[META_PTR]], ptr %p)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i32 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define void @Store8(ptr %p, i64 %x) sanitize_memory {
+entry:
+ store i64 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store8(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_store_8(ptr [[META_PTR]], ptr %p)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i64 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define void @Store16(ptr %p, i128 %x) sanitize_memory {
+entry:
+ store i128 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store16(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_store_n(ptr [[META_PTR]], ptr %p, i64 16)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i128 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define i8 @Load1(ptr %p) sanitize_memory {
+entry:
+ %0 = load i8, ptr %p
+ ret i8 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load1(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_load_1(ptr [[META_PTR]], ptr %p)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i8, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i8 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i8 {{.+}}
+
+define i16 @Load2(ptr %p) sanitize_memory {
+entry:
+ %0 = load i16, ptr %p
+ ret i16 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load2(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_load_2(ptr [[META_PTR]], ptr %p)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i16, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i16 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i16 {{.+}}
+
+define i32 @Load4(ptr %p) sanitize_memory {
+entry:
+ %0 = load i32, ptr %p
+ ret i32 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load4(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_load_4(ptr [[META_PTR]], ptr %p)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i32, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i32 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i32 {{.+}}
+
+define i64 @Load8(ptr %p) sanitize_memory {
+entry:
+ %0 = load i64, ptr %p
+ ret i64 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load8(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_load_8(ptr [[META_PTR]], ptr %p)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i64, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i64 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i64 {{.+}}
+
+define i128 @Load16(ptr %p) sanitize_memory {
+entry:
+ %0 = load i128, ptr %p
+ ret i128 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load16(
+; CHECK: [[META_PTR:%[a-z0-9_]+]] = alloca { ptr, ptr }
+; CHECK: call void @__msan_metadata_ptr_for_load_n(ptr [[META_PTR]], ptr %p, i64 16)
+; CHECK: [[META:%[a-z0-9_]+]] = load { ptr, ptr }, ptr [[META_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i128, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i128 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i128 {{.+}}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
index de8c9d095e15d..1535fccfc2110 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
@@ -3,9 +3,60 @@
target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"
target triple = "s390x-unknown-linux-gnu"
-declare i64 @foo(i64 %guard, ...) #0
+%struct.__va_list = type { i64, i64, ptr, ptr }
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)
+
+define i64 @foo(i64 %guard, ...) #1 {
+ %vl = alloca %struct.__va_list
+ call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.va_start(ptr %vl)
+ call void @llvm.va_end(ptr %vl)
+ call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ ret i64 0
+}
-attributes #0 = { "target-features"="+soft-float" "use-soft-float"="true" }
+; CHECK-LABEL: define {{[^@]+}}@foo(
+
+; Callers store variadic arguments' shadow and origins into va_arg_shadow and
+; va_arg_origin. Their layout is: the register save area (160 bytes) followed
+; by the overflow arg area. It does not depend on "packed-stack".
+; Check that callees correctly backup shadow into a local variable.
+
+; CHECK: [[TMP:%.*]] = alloca { ptr, ptr }
+; CHECK: [[OverflowSize:%.*]] = load i64, ptr %va_arg_overflow_size
+; CHECK: [[MetaSize:%.*]] = add i64 160, [[OverflowSize]]
+; CHECK: [[ShadowBackup:%.*]] = alloca {{.*}} [[MetaSize]]
+; CHECK: [[MetaCopySize:%.*]] = call i64 @llvm.umin.i64(i64 [[MetaSize]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ShadowBackup]], ptr align 8 %va_arg_shadow, i64 [[MetaCopySize]], i1 false)
+; CHECK: [[OverflowBackup:%.*]] = alloca {{.*}} [[MetaSize]]
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[OverflowBackup]], ptr align 8 %va_arg_origin, i64 [[MetaCopySize]], i1 false)
+
+; Check that va_start() correctly copies the shadow backup into the shadow of
+; the va_list. Register save area and overflow arg area are copied separately.
+; Only 56 bytes of the register save area is copied, because of
+; "use-soft-float".
+
+; CHECK: call void @llvm.va_start(ptr %vl)
+; CHECK: [[VlAddr:%.*]] = ptrtoint ptr %vl to i64
+; CHECK: [[RegSaveAreaAddrAddr:%.*]] = add i64 [[VlAddr]], 24
+; CHECK: [[RegSaveAreaAddr:%.*]] = inttoptr i64 [[RegSaveAreaAddrAddr]] to ptr
+; CHECK: [[RegSaveArea:%.*]] = load ptr, ptr [[RegSaveAreaAddr]]
+; CHECK: call void @__msan_metadata_ptr_for_store_1(ptr [[TMP]], ptr [[RegSaveArea]])
+; CHECK: [[RegSaveAreaMeta:%.*]] = load { ptr, ptr }, ptr [[TMP]]
+; CHECK: [[RegSaveAreaShadow:%.*]] = extractvalue { ptr, ptr } [[RegSaveAreaMeta]], 0
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[RegSaveAreaShadow]], ptr align 8 [[ShadowBackup]], i64 56, i1 false)
+; CHECK: [[VlAddr:%.*]] = ptrtoint ptr %vl to i64
+; CHECK: [[OverflowAddrAddr:%.*]] = add i64 [[VlAddr]], 16
+; CHECK: [[OverflowAddr:%.*]] = inttoptr i64 [[OverflowAddrAddr]] to ptr
+; CHECK: [[Overflow:%.*]] = load ptr, ptr [[OverflowAddr]]
+; CHECK: call void @__msan_metadata_ptr_for_store_1(ptr [[TMP]], ptr [[Overflow]])
+; CHECK: [[OverflowMeta:%.*]] = load { ptr, ptr }, ptr [[TMP]]
+; CHECK: [[OverflowShadow:%.*]] = extractvalue { ptr, ptr } [[OverflowMeta]], 0
+; CHECK: [[OverflowShadowBackup:%.*]] = getelementptr i8, ptr [[ShadowBackup]], i32 160
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[OverflowShadow]], ptr align 8 [[OverflowShadowBackup]], i64 [[OverflowSize]], i1 false)
declare i32 @random_i32()
declare i64 @random_i64()
More information about the llvm-commits
mailing list