[llvm] d9e2fb7 - [msan] Add 32-bit platforms support (#109284)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 14 01:41:17 PST 2024
Author: k-kashapov
Date: 2024-11-14T01:41:13-08:00
New Revision: d9e2fb70d0b72b398fef3106bab0605b5b3e6761
URL: https://github.com/llvm/llvm-project/commit/d9e2fb70d0b72b398fef3106bab0605b5b3e6761
DIFF: https://github.com/llvm/llvm-project/commit/d9e2fb70d0b72b398fef3106bab0605b5b3e6761.diff
LOG: [msan] Add 32-bit platforms support (#109284)
References https://github.com/llvm/llvm-project/issues/103057
Added `VAArgHelper` functions for platforms: ARM32, i386, RISC-V,
PowerPC32, MIPS32.
ARM, RISCV and MIPS share similar conventions regarding va args.
Therefore `VAArgGenericHelper` was introduced to avoid code duplication.
---------
Co-authored-by: Kamil Kashapov <kashapov at ispras.ru>
Co-authored-by: Vitaly Buka <vitalybuka at google.com>
Added:
llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll
llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll
llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386_bts_asm.ll
llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll
llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll
llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll
llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll
Modified:
llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll
llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll
llvm/test/Instrumentation/MemorySanitizer/i386/vararg.ll
llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
Removed:
llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-x86.ll
llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-x86.ll
llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86_bts_asm.ll
llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86intrinsics.ll
llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-x86.ll
llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-x86.ll
llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-x86.ll
################################################################################
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 8c2fde8453a74a..dca6bf1adfde8a 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -411,6 +411,10 @@ static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
0x100000000000, // OriginBase
};
+// mips32 Linux
+// FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
+// after picking good constants
+
// mips64 Linux
static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
0, // AndMask (not used)
@@ -419,6 +423,10 @@ static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
0x002000000000, // OriginBase
};
+// ppc32 Linux
+// FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
+// after picking good constants
+
// ppc64 Linux
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
0xE00000000000, // AndMask
@@ -435,6 +443,10 @@ static const MemoryMapParams Linux_S390X_MemoryMapParams = {
0x1C0000000000, // OriginBase
};
+// arm32 Linux
+// FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
+// after picking good constants
+
// aarch64 Linux
static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
0, // AndMask (not used)
@@ -451,6 +463,10 @@ static const MemoryMapParams Linux_LoongArch64_MemoryMapParams = {
0x100000000000, // OriginBase
};
+// riscv32 Linux
+// FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
+// after picking good constants
+
// aarch64 FreeBSD
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams = {
0x1800000000000, // AndMask
@@ -559,6 +575,7 @@ class MemorySanitizer {
friend struct VarArgAArch64Helper;
friend struct VarArgPowerPCHelper;
friend struct VarArgSystemZHelper;
+ friend struct VarArgI386Helper;
friend struct VarArgGenericHelper;
void initializeModule(Module &M);
@@ -5606,10 +5623,15 @@ struct VarArgPowerPCHelper : public VarArgHelperBase {
// Parameter save area starts at 48 bytes from frame pointer for ABIv1,
// and 32 bytes for ABIv2. This is usually determined by target
// endianness, but in theory could be overridden by function attribute.
- if (TargetTriple.getArch() == Triple::ppc64)
- VAArgBase = 48;
- else
- VAArgBase = 32;
+ if (TargetTriple.isPPC64()) {
+ if (TargetTriple.isPPC64ELFv2ABI())
+ VAArgBase = 32;
+ else
+ VAArgBase = 48;
+ } else {
+ // Parameter save area is 8 bytes from frame pointer in PPC32
+ VAArgBase = 8;
+ }
unsigned VAArgOffset = VAArgBase;
const DataLayout &DL = F.getDataLayout();
for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
@@ -5674,7 +5696,7 @@ struct VarArgPowerPCHelper : public VarArgHelperBase {
}
Constant *TotalVAArgSize =
- ConstantInt::get(IRB.getInt64Ty(), VAArgOffset - VAArgBase);
+ ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
// Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
// a new class member i.e. it is the total size of all VarArgs.
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
@@ -5705,14 +5727,24 @@ struct VarArgPowerPCHelper : public VarArgHelperBase {
// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
+ Triple TargetTriple(F.getParent()->getTargetTriple());
for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
- Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), MS.PtrTy);
+ Value *RegSaveAreaPtrPtr = IRB.CreatePtrToInt(VAListTag, MS.IntptrTy);
+
+ // In PPC32 va_list_tag is a struct, whereas in PPC64 it's a pointer
+ if (!TargetTriple.isPPC64()) {
+ RegSaveAreaPtrPtr =
+ IRB.CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
+ }
+ RegSaveAreaPtrPtr = IRB.CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
+
Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
- const Align Alignment = Align(8);
+ const DataLayout &DL = F.getDataLayout();
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
+ const Align Alignment = Align(IntptrSize);
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
@@ -6003,8 +6035,118 @@ struct VarArgSystemZHelper : public VarArgHelperBase {
}
};
-/// MIPS-specific implementation of VarArgHelper.
-/// NOTE: This is also used for LoongArch64.
+/// i386-specific implementation of VarArgHelper.
+struct VarArgI386Helper : public VarArgHelperBase {
+ AllocaInst *VAArgTLSCopy = nullptr;
+ Value *VAArgSize = nullptr;
+
+ VarArgI386Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV)
+ : VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/4) {}
+
+ void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
+ const DataLayout &DL = F.getDataLayout();
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
+ unsigned VAArgOffset = 0;
+ for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
+ bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
+ bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
+ if (IsByVal) {
+ assert(A->getType()->isPointerTy());
+ Type *RealTy = CB.getParamByValType(ArgNo);
+ uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
+ Align ArgAlign = CB.getParamAlign(ArgNo).value_or(Align(IntptrSize));
+ if (ArgAlign < IntptrSize)
+ ArgAlign = Align(IntptrSize);
+ VAArgOffset = alignTo(VAArgOffset, ArgAlign);
+ if (!IsFixed) {
+ Value *Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
+ if (Base) {
+ Value *AShadowPtr, *AOriginPtr;
+ std::tie(AShadowPtr, AOriginPtr) =
+ MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
+ kShadowTLSAlignment, /*isStore*/ false);
+
+ IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
+ kShadowTLSAlignment, ArgSize);
+ }
+ VAArgOffset += alignTo(ArgSize, Align(IntptrSize));
+ }
+ } else {
+ Value *Base;
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
+ Align ArgAlign = Align(IntptrSize);
+ VAArgOffset = alignTo(VAArgOffset, ArgAlign);
+ if (DL.isBigEndian()) {
+ // Adjusting the shadow for argument with size < IntptrSize to match
+ // the placement of bits in big endian system
+ if (ArgSize < IntptrSize)
+ VAArgOffset += (IntptrSize - ArgSize);
+ }
+ if (!IsFixed) {
+ Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
+ if (Base)
+ IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
+ VAArgOffset += ArgSize;
+ VAArgOffset = alignTo(VAArgOffset, Align(IntptrSize));
+ }
+ }
+ }
+
+ Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
+ // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
+ // a new class member i.e. it is the total size of all VarArgs.
+ IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
+ }
+
+ void finalizeInstrumentation() override {
+ assert(!VAArgSize && !VAArgTLSCopy &&
+ "finalizeInstrumentation called twice");
+ IRBuilder<> IRB(MSV.FnPrologueEnd);
+ VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
+ Value *CopySize = VAArgSize;
+
+ if (!VAStartInstrumentationList.empty()) {
+ // If there is a va_start in this function, make a backup copy of
+ // va_arg_tls somewhere in the function entry block.
+ VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
+ VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
+ IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
+ CopySize, kShadowTLSAlignment, false);
+
+ Value *SrcSize = IRB.CreateBinaryIntrinsic(
+ Intrinsic::umin, CopySize,
+ ConstantInt::get(IRB.getInt64Ty(), kParamTLSSize));
+ IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
+ kShadowTLSAlignment, SrcSize);
+ }
+
+ // Instrument va_start.
+ // Copy va_list shadow from the backup copy of the TLS contents.
+ for (CallInst *OrigInst : VAStartInstrumentationList) {
+ NextNodeIRBuilder IRB(OrigInst);
+ Value *VAListTag = OrigInst->getArgOperand(0);
+ Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
+ PointerType::get(RegSaveAreaPtrTy, 0));
+ Value *RegSaveAreaPtr =
+ IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
+ Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
+ const DataLayout &DL = F.getDataLayout();
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
+ const Align Alignment = Align(IntptrSize);
+ std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
+ MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
+ Alignment, /*isStore*/ true);
+ IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
+ CopySize);
+ }
+ }
+};
+
+/// Implementation of VarArgHelper that is used for ARM32, MIPS, RISCV,
+/// LoongArch64.
struct VarArgGenericHelper : public VarArgHelperBase {
AllocaInst *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
@@ -6016,24 +6158,25 @@ struct VarArgGenericHelper : public VarArgHelperBase {
void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
const DataLayout &DL = F.getDataLayout();
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
for (Value *A :
llvm::drop_begin(CB.args(), CB.getFunctionType()->getNumParams())) {
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
if (DL.isBigEndian()) {
- // Adjusting the shadow for argument with size < 8 to match the
+ // Adjusting the shadow for argument with size < IntptrSize to match the
// placement of bits in big endian system
- if (ArgSize < 8)
- VAArgOffset += (8 - ArgSize);
+ if (ArgSize < IntptrSize)
+ VAArgOffset += (IntptrSize - ArgSize);
}
Value *Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
VAArgOffset += ArgSize;
- VAArgOffset = alignTo(VAArgOffset, 8);
+ VAArgOffset = alignTo(VAArgOffset, IntptrSize);
if (!Base)
continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
- Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
+ Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
// Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
// a new class member i.e. it is the total size of all VarArgs.
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
@@ -6066,11 +6209,16 @@ struct VarArgGenericHelper : public VarArgHelperBase {
for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
- Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
- IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), MS.PtrTy);
- Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
+ Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
+ PointerType::get(RegSaveAreaPtrTy, 0));
+ Value *RegSaveAreaPtr =
+ IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
- const Align Alignment = Align(8);
+ const DataLayout &DL = F.getDataLayout();
+ unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
+ const Align Alignment = Align(IntptrSize);
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
@@ -6080,8 +6228,10 @@ struct VarArgGenericHelper : public VarArgHelperBase {
}
};
-// Loongarch64 is not a MIPS, but the current vargs calling convention matches
-// the MIPS.
+// ARM32, Loongarch64, MIPS and RISCV share the same calling conventions
+// regarding VAArgs.
+using VarArgARM32Helper = VarArgGenericHelper;
+using VarArgRISCVHelper = VarArgGenericHelper;
using VarArgMIPSHelper = VarArgGenericHelper;
using VarArgLoongArch64Helper = VarArgGenericHelper;
@@ -6106,18 +6256,39 @@ static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
// VarArg handling is only implemented on AMD64. False positives are possible
// on other platforms.
Triple TargetTriple(Func.getParent()->getTargetTriple());
+
+ if (TargetTriple.getArch() == Triple::x86)
+ return new VarArgI386Helper(Func, Msan, Visitor);
+
if (TargetTriple.getArch() == Triple::x86_64)
return new VarArgAMD64Helper(Func, Msan, Visitor);
+ if (TargetTriple.isARM())
+ return new VarArgARM32Helper(Func, Msan, Visitor, /*VAListTagSize=*/4);
+
if (TargetTriple.isAArch64())
return new VarArgAArch64Helper(Func, Msan, Visitor);
if (TargetTriple.isSystemZ())
return new VarArgSystemZHelper(Func, Msan, Visitor);
+ // On PowerPC32 VAListTag is a struct
+ // {char, char, i16 padding, char *, char *}
+ if (TargetTriple.isPPC32())
+ return new VarArgPowerPCHelper(Func, Msan, Visitor, /*VAListTagSize=*/12);
+
if (TargetTriple.isPPC64())
return new VarArgPowerPCHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);
+ if (TargetTriple.isRISCV32())
+ return new VarArgRISCVHelper(Func, Msan, Visitor, /*VAListTagSize=*/4);
+
+ if (TargetTriple.isRISCV64())
+ return new VarArgRISCVHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);
+
+ if (TargetTriple.isMIPS32())
+ return new VarArgMIPSHelper(Func, Msan, Visitor, /*VAListTagSize=*/4);
+
if (TargetTriple.isMIPS64())
return new VarArgMIPSHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);
diff --git a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
index 3eccfb3170e242..52f4901298611a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+; RUN: opt < %s -S -passes=msan -msan-origin-base=0x40000000 -msan-and-mask=0x80000000 2>&1 | FileCheck %s
target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
-target triple = "mips64--linux"
+target triple = "arm--linux"
define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: define i32 @foo(
@@ -15,20 +15,20 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 549755813888
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 549755813888
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 549755813888
+; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP14]], -2147483649
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
index 3eccfb3170e242..05a88f0c252a08 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+; RUN: opt < %s -S -passes=msan -msan-origin-base=0x40000000 -msan-and-mask=0x80000000 2>&1 | FileCheck %s
target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
-target triple = "mips64--linux"
+target triple = "mips--linux"
define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: define i32 @foo(
@@ -15,20 +15,20 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 549755813888
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 549755813888
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 549755813888
+; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP14]], -2147483649
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
index 9dc4f70e9078f2..971b25febe0e28 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+; RUN: opt < %s -S -passes=msan -msan-origin-base=0x40000000 -msan-and-mask=0x80000000 2>&1 | FileCheck %s
target datalayout = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
-target triple = "mips64el--linux"
+target triple = "mipsel--linux"
define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: define i32 @foo(
@@ -15,20 +15,20 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 549755813888
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 549755813888
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 549755813888
+; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP14]], -2147483649
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
index e7c78f12a17145..bbf6842cd82c9e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -S -msan-kernel=1 -passes=msan 2>&1 | FileCheck %s
+; RUN: opt < %s -S -msan-kernel=1 -passes=msan -msan-origin-base=0x40000000 -msan-and-mask=0x80000000 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-n32:64"
-target triple = "powerpc64le--linux"
+target triple = "powerpcle--linux"
define void @Store1(ptr %p, i8 %x) sanitize_memory {
; CHECK-LABEL: define void @Store1(
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
index 50343106fc2ad2..d05b11f3f050dd 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+; RUN: opt < %s -S -passes=msan -msan-origin-base=0x40000000 -msan-and-mask=0x80000000 2>&1 | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
-target triple = "powerpc64--linux"
+target triple = "powerpc--linux"
define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: define i32 @foo(
@@ -15,28 +15,23 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -246290604621825
-; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 17592186044416
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 8796093022208
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], -246290604621825
-; CHECK-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 17592186044416
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 8796093022208
-; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -2147483649
+; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP19]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 12, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
-; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64
-; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -246290604621825
-; CHECK-NEXT: [[TMP20:%.*]] = xor i64 [[TMP19]], 17592186044416
-; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[TMP20]], 8796093022208
-; CHECK-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP22]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 8
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], -2147483649
+; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP17]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
@@ -93,8 +88,8 @@ define i32 @bar2() {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
@@ -136,8 +131,8 @@ define i32 @bar5() {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
@@ -157,17 +152,13 @@ define i32 @bar6(ptr %arg) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -246290604621825
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 17592186044416
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 8796093022208
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i8 0, i64 16, i1 false)
-; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -246290604621825
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 17592186044416
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 8796093022208
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP12]], i64 16, i1 false)
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP8]], i64 16, i1 false)
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]])
@@ -188,18 +179,14 @@ define i32 @bar7(ptr %arg) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -246290604621825
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 17592186044416
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 8796093022208
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i8 0, i64 32, i1 false)
-; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -246290604621825
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 17592186044416
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 8796093022208
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 [[TMP12]], i64 32, i1 false)
-; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP8]], i64 32, i1 false)
+; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]])
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
index aabb3b859cdf55..54069b71be83ff 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+; RUN: opt < %s -S -passes=msan -msan-origin-base=0x40000000 -msan-and-mask=0x80000000 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-n32:64"
-target triple = "powerpc64le--linux"
+target triple = "powerpcle--linux"
define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: define i32 @foo(
@@ -15,28 +15,23 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -246290604621825
-; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 17592186044416
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 8796093022208
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
-; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], -246290604621825
-; CHECK-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 17592186044416
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 8796093022208
-; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -2147483649
+; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP19]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 12, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
-; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
-; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64
-; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -246290604621825
-; CHECK-NEXT: [[TMP20:%.*]] = xor i64 [[TMP19]], 17592186044416
-; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[TMP20]], 8796093022208
-; CHECK-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP22]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[VL]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], 8
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], -2147483649
+; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP17]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
@@ -92,8 +87,8 @@ define i32 @bar2() {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>)
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
@@ -135,8 +130,8 @@ define i32 @bar5() {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store [2 x i128] zeroinitializer, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2])
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
@@ -156,17 +151,13 @@ define i32 @bar6(ptr %arg) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -246290604621825
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 17592186044416
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 8796093022208
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i8 0, i64 16, i1 false)
-; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -246290604621825
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 17592186044416
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 8796093022208
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP12]], i64 16, i1 false)
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP8]], i64 16, i1 false)
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 [[ARG]])
@@ -187,18 +178,14 @@ define i32 @bar7(ptr %arg) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -246290604621825
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 17592186044416
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 8796093022208
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i8 0, i64 32, i1 false)
-; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -246290604621825
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 17592186044416
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 8796093022208
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 [[TMP12]], i64 32, i1 false)
-; CHECK-NEXT: store i64 40, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
+; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP8]], i64 32, i1 false)
+; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[TMP13:%.*]] = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 [[ARG]])
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
diff --git a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
index 3eccfb3170e242..50e7be13049eb1 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+; RUN: opt < %s -S -passes=msan -msan-origin-base=0x40000000 -msan-and-mask=0x80000000 2>&1 | FileCheck %s
target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
-target triple = "mips64--linux"
+target triple = "riscv32--linux"
define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: define i32 @foo(
@@ -15,20 +15,20 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 549755813888
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 549755813888
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 549755813888
+; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP14]], -2147483649
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll
similarity index 89%
rename from llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-x86.ll
rename to llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll
index 5bb2c3f0e233b9..bd3b9388eadff7 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-i386.ll
@@ -2,12 +2,13 @@
; RUN: opt %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_addsub_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
@@ -24,6 +25,7 @@ define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1)
; CHECK-LABEL: @test_x86_avx_addsub_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
@@ -38,21 +40,22 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi
define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_blendv_pd_256(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double> [[A2:%.*]] to <4 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = ashr <4 x i64> [[TMP4]], splat (i64 63)
+; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x double> [[A2:%.*]] to <4 x i64>
+; CHECK-NEXT: [[TMP5:%.*]] = ashr <4 x i64> [[TMP13]], splat (i64 63)
; CHECK-NEXT: [[TMP6:%.*]] = trunc <4 x i64> [[TMP5]] to <4 x i1>
-; CHECK-NEXT: [[TMP7:%.*]] = ashr <4 x i64> [[TMP1]], splat (i64 63)
+; CHECK-NEXT: [[TMP7:%.*]] = ashr <4 x i64> [[TMP4]], splat (i64 63)
; CHECK-NEXT: [[TMP8:%.*]] = trunc <4 x i64> [[TMP7]] to <4 x i1>
-; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP6]], <4 x i64> [[TMP2]], <4 x i64> [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP6]], <4 x i64> [[TMP2]], <4 x i64> [[TMP12]]
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x double> [[A1:%.*]] to <4 x i64>
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x double> [[A0:%.*]] to <4 x i64>
-; CHECK-NEXT: [[TMP12:%.*]] = xor <4 x i64> [[TMP10]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = or <4 x i64> [[TMP12]], [[TMP2]]
-; CHECK-NEXT: [[TMP14:%.*]] = or <4 x i64> [[TMP13]], [[TMP3]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor <4 x i64> [[TMP10]], [[TMP11]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP14:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP12]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <4 x i1> [[TMP8]], <4 x i64> [[TMP14]], <4 x i64> [[TMP9]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> [[A0]], <4 x double> [[A1]], <4 x double> [[A2]])
; CHECK-NEXT: store <4 x i64> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
@@ -66,21 +69,22 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_avx_blendv_ps_256(
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x float> [[A2:%.*]] to <8 x i32>
-; CHECK-NEXT: [[TMP5:%.*]] = ashr <8 x i32> [[TMP4]], splat (i32 31)
+; CHECK-NEXT: [[TMP13:%.*]] = bitcast <8 x float> [[A2:%.*]] to <8 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = ashr <8 x i32> [[TMP13]], splat (i32 31)
; CHECK-NEXT: [[TMP6:%.*]] = trunc <8 x i32> [[TMP5]] to <8 x i1>
-; CHECK-NEXT: [[TMP7:%.*]] = ashr <8 x i32> [[TMP1]], splat (i32 31)
+; CHECK-NEXT: [[TMP7:%.*]] = ashr <8 x i32> [[TMP4]], splat (i32 31)
; CHECK-NEXT: [[TMP8:%.*]] = trunc <8 x i32> [[TMP7]] to <8 x i1>
-; CHECK-NEXT: [[TMP9:%.*]] = select <8 x i1> [[TMP6]], <8 x i32> [[TMP2]], <8 x i32> [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = select <8 x i1> [[TMP6]], <8 x i32> [[TMP2]], <8 x i32> [[TMP12]]
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <8 x float> [[A1:%.*]] to <8 x i32>
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <8 x float> [[A0:%.*]] to <8 x i32>
-; CHECK-NEXT: [[TMP12:%.*]] = xor <8 x i32> [[TMP10]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = or <8 x i32> [[TMP12]], [[TMP2]]
-; CHECK-NEXT: [[TMP14:%.*]] = or <8 x i32> [[TMP13]], [[TMP3]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor <8 x i32> [[TMP10]], [[TMP11]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP14:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP12]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP8]], <8 x i32> [[TMP14]], <8 x i32> [[TMP9]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> [[A0]], <8 x float> [[A1]], <8 x float> [[A2]])
; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
@@ -96,6 +100,7 @@ define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1)
; CHECK-LABEL: @test_x86_avx_cmp_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -114,6 +119,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0
; CHECK-LABEL: @test_x86_avx_cmp_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -130,6 +136,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x floa
; CHECK-LABEL: @test_x86_avx_cmp_ps_256_pseudo_op(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP99:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -302,14 +309,15 @@ declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounw
define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_cvt_pd2_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -323,14 +331,15 @@ declare <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double>) nounwind readnone
define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_cvt_pd2dq_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -344,14 +353,15 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>) nounwind readnone
define <8 x i32> @test_x86_avx_cvt_ps2dq_256(<8 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_cvt_ps2dq_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> [[A0:%.*]])
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
@@ -365,14 +375,15 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float>) nounwind readnone
define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_cvtt_pd2dq_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -386,14 +397,15 @@ declare <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double>) nounwind readnone
define <8 x i32> @test_x86_avx_cvtt_ps2dq_256(<8 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_cvtt_ps2dq_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> [[A0:%.*]])
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
@@ -408,6 +420,7 @@ define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) #0
; CHECK-LABEL: @test_x86_avx_dp_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> <i1 false, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> [[TMP3]], <8 x i32> zeroinitializer
@@ -434,6 +447,7 @@ define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1
; CHECK-LABEL: @test_x86_avx_hadd_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
@@ -450,6 +464,7 @@ define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) #
; CHECK-LABEL: @test_x86_avx_hadd_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
@@ -466,6 +481,7 @@ define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1
; CHECK-LABEL: @test_x86_avx_hsub_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
@@ -482,6 +498,7 @@ define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) #
; CHECK-LABEL: @test_x86_avx_hsub_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
@@ -497,17 +514,18 @@ declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind
define <32 x i8> @test_x86_avx_ldu_dq_256(ptr %a0) #0 {
; CHECK-LABEL: @test_x86_avx_ldu_dq_256(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP4]], align 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr [[A0]])
; CHECK-NEXT: store <32 x i8> [[_MSLD]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <32 x i8> [[RES]]
@@ -522,16 +540,17 @@ define <2 x double> @test_x86_avx_maskload_pd(ptr %a0, <2 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
-; CHECK: 4:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
+; CHECK: 5:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 5:
+; CHECK: 6:
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.avx.maskload.pd(ptr [[A0:%.*]], <2 x i64> [[MASK:%.*]])
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x double> [[RES]]
@@ -546,16 +565,17 @@ define <4 x double> @test_x86_avx_maskload_pd_256(ptr %a0, <4 x i64> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP3]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
-; CHECK: 4:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
+; CHECK: 5:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 5:
+; CHECK: 6:
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.maskload.pd.256(ptr [[A0:%.*]], <4 x i64> [[MASK:%.*]])
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x double> [[RES]]
@@ -570,16 +590,17 @@ define <4 x float> @test_x86_avx_maskload_ps(ptr %a0, <4 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
-; CHECK: 4:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
+; CHECK: 5:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 5:
+; CHECK: 6:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx.maskload.ps(ptr [[A0:%.*]], <4 x i32> [[MASK:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -594,16 +615,17 @@ define <8 x float> @test_x86_avx_maskload_ps_256(ptr %a0, <8 x i32> %mask) #0 {
; CHECK-LABEL: @test_x86_avx_maskload_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP3]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
-; CHECK: 4:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
+; CHECK: 5:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 5:
+; CHECK: 6:
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.maskload.ps.256(ptr [[A0:%.*]], <8 x i32> [[MASK:%.*]])
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x float> [[RES]]
@@ -619,6 +641,7 @@ define void @test_x86_avx_maskstore_pd(ptr %a0, <2 x i64> %mask, <2 x double> %a
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -627,11 +650,11 @@ define void @test_x86_avx_maskstore_pd(ptr %a0, <2 x i64> %mask, <2 x double> %a
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP5]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: call void @llvm.x86.avx.maskstore.pd(ptr [[A0:%.*]], <2 x i64> [[MASK:%.*]], <2 x double> [[A2:%.*]])
; CHECK-NEXT: ret void
;
@@ -646,6 +669,7 @@ define void @test_x86_avx_maskstore_pd_256(ptr %a0, <4 x i64> %mask, <4 x double
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -654,11 +678,11 @@ define void @test_x86_avx_maskstore_pd_256(ptr %a0, <4 x i64> %mask, <4 x double
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP3]] to i256
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i256 [[TMP5]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: call void @llvm.x86.avx.maskstore.pd.256(ptr [[A0:%.*]], <4 x i64> [[MASK:%.*]], <4 x double> [[A2:%.*]])
; CHECK-NEXT: ret void
;
@@ -673,6 +697,7 @@ define void @test_x86_avx_maskstore_ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -681,11 +706,11 @@ define void @test_x86_avx_maskstore_ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP5]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: call void @llvm.x86.avx.maskstore.ps(ptr [[A0:%.*]], <4 x i32> [[MASK:%.*]], <4 x float> [[A2:%.*]])
; CHECK-NEXT: ret void
;
@@ -700,6 +725,7 @@ define void @test_x86_avx_maskstore_ps_256(ptr %a0, <8 x i32> %mask, <8 x float>
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -708,11 +734,11 @@ define void @test_x86_avx_maskstore_ps_256(ptr %a0, <8 x i32> %mask, <8 x float>
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP3]] to i256
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i256 [[TMP5]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: call void @llvm.x86.avx.maskstore.ps.256(ptr [[A0:%.*]], <8 x i32> [[MASK:%.*]], <8 x float> [[A2:%.*]])
; CHECK-NEXT: ret void
;
@@ -726,6 +752,7 @@ define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1)
; CHECK-LABEL: @test_x86_avx_max_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
@@ -742,6 +769,7 @@ define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) #0
; CHECK-LABEL: @test_x86_avx_max_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
@@ -758,6 +786,7 @@ define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1)
; CHECK-LABEL: @test_x86_avx_min_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
@@ -774,6 +803,7 @@ define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) #0
; CHECK-LABEL: @test_x86_avx_min_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
@@ -789,14 +819,15 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind
define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_movmsk_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -810,14 +841,15 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone
define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_movmsk_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -832,6 +864,7 @@ define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestc_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -851,6 +884,7 @@ define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestnzc_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -870,6 +904,7 @@ define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_ptestz_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -888,6 +923,7 @@ declare i32 @llvm.x86.avx.ptestz.256(<4 x i64>, <4 x i64>) nounwind readnone
define <8 x float> @test_x86_avx_rcp_ps_256(<8 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_rcp_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> [[A0:%.*]])
; CHECK-NEXT: store <8 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
@@ -902,14 +938,15 @@ declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone
define <4 x double> @test_x86_avx_round_pd_256(<4 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_round_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> [[A0:%.*]], i32 7)
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x double> [[RES]]
@@ -923,14 +960,15 @@ declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind read
define <8 x float> @test_x86_avx_round_ps_256(<8 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_round_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> [[A0:%.*]], i32 7)
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x float> [[RES]]
@@ -944,6 +982,7 @@ declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readno
define <8 x float> @test_x86_avx_rsqrt_ps_256(<8 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_rsqrt_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> [[A0:%.*]])
; CHECK-NEXT: store <8 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
@@ -958,17 +997,18 @@ define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1)
; CHECK-LABEL: @test_x86_avx_vpermilvar_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> [[A0:%.*]], <2 x i64> [[A1:%.*]])
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x double> [[RES]]
@@ -983,17 +1023,18 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64>
; CHECK-LABEL: @test_x86_avx_vpermilvar_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP4]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> [[A0:%.*]], <4 x i64> [[A1:%.*]])
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x double> [[RES]]
@@ -1006,14 +1047,15 @@ declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) no
define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_avx_vpermilvar_pd_256_2(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> [[A0:%.*]], <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x double> [[RES]]
@@ -1026,17 +1068,18 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> [[A0:%.*]], <4 x i32> [[A1:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -1048,16 +1091,17 @@ define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A2:%.*]] = load <4 x i32>, ptr [[A1:%.*]], align 16
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A1]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP7]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -1065,11 +1109,11 @@ define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i32> [[_MSLD]] to i128
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP9]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP1]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
-; CHECK: 10:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
+; CHECK: 11:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 11:
+; CHECK: 12:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> [[A0:%.*]], <4 x i32> [[A2]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -1085,17 +1129,18 @@ define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a
; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP4]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> [[A0:%.*]], <8 x i32> [[A1:%.*]])
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x float> [[RES]]
@@ -1110,6 +1155,7 @@ define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -1129,6 +1175,7 @@ define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -1148,6 +1195,7 @@ define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
@@ -1167,6 +1215,7 @@ define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestc_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -1186,6 +1235,7 @@ define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -1205,6 +1255,7 @@ define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) #0
; CHECK-LABEL: @test_x86_avx_vtestnzc_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -1224,6 +1275,7 @@ define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
@@ -1243,6 +1295,7 @@ define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestnzc_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -1262,6 +1315,7 @@ define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -1281,6 +1335,7 @@ define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_pd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
@@ -1300,6 +1355,7 @@ define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
@@ -1319,6 +1375,7 @@ define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_avx_vtestz_ps_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -1336,6 +1393,7 @@ declare i32 @llvm.x86.avx.vtestz.ps.256(<8 x float>, <8 x float>) nounwind readn
define void @test_x86_avx_vzeroall() #0 {
; CHECK-LABEL: @test_x86_avx_vzeroall(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: call void @llvm.x86.avx.vzeroall()
; CHECK-NEXT: ret void
@@ -1348,6 +1406,7 @@ declare void @llvm.x86.avx.vzeroall() nounwind
define void @test_x86_avx_vzeroupper() #0 {
; CHECK-LABEL: @test_x86_avx_vzeroupper(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: call void @llvm.x86.avx.vzeroupper()
; CHECK-NEXT: ret void
@@ -1361,19 +1420,20 @@ define void @movnt_dq(ptr %p, <2 x i64> %a1) nounwind #0 {
; CHECK-LABEL: @movnt_dq(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A2:%.*]] = add <2 x i64> [[A1:%.*]], splat (i64 1)
; CHECK-NEXT: [[_MSPROP1:%.*]] = shufflevector <2 x i64> [[_MSPROP]], <2 x i64> splat (i64 -1), <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
; CHECK-NEXT: [[A3:%.*]] = shufflevector <2 x i64> [[A2]], <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: store <4 x i64> [[_MSPROP1]], ptr [[TMP7]], align 32
; CHECK-NEXT: store <4 x i64> [[A3]], ptr [[P]], align 32, !nontemporal [[META2:![0-9]+]]
@@ -1389,16 +1449,17 @@ declare void @llvm.x86.avx.movnt.dq.256(ptr, <4 x i64>) nounwind
define void @movnt_ps(ptr %p, <8 x float> %a) nounwind #0 {
; CHECK-LABEL: @movnt_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: store <8 x i32> [[TMP2]], ptr [[TMP7]], align 32
; CHECK-NEXT: store <8 x float> [[A:%.*]], ptr [[P]], align 32, !nontemporal [[META2]]
@@ -1414,17 +1475,18 @@ define void @movnt_pd(ptr %p, <4 x double> %a1) nounwind #0 {
; CHECK-LABEL: @movnt_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[A2:%.*]] = fadd <4 x double> [[A1:%.*]], zeroinitializer
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: store <4 x i64> [[_MSPROP]], ptr [[TMP7]], align 32
; CHECK-NEXT: store <4 x double> [[A2]], ptr [[P]], align 32, !nontemporal [[META2]]
@@ -1441,6 +1503,7 @@ define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_pclmulqdq(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <2 x i32> zeroinitializer
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll
similarity index 90%
rename from llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-x86.ll
rename to llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll
index 1602e85d8516d2..d6ff88d37427f6 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-i386.ll
@@ -2,12 +2,13 @@
; RUN: opt %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packssdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -26,6 +27,7 @@ declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readno
define <16 x i16> @test_x86_avx2_packssdw_fold() #0 {
; CHECK-LABEL: @test_x86_avx2_packssdw_fold(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> zeroinitializer)
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
@@ -41,6 +43,7 @@ define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packsswb(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i16>
@@ -59,6 +62,7 @@ declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_packsswb_fold() #0 {
; CHECK-LABEL: @test_x86_avx2_packsswb_fold(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP_VECTOR_PACK:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> zeroinitializer, <16 x i16> zeroinitializer)
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> <i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
@@ -74,6 +78,7 @@ define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packuswb(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i16>
@@ -92,6 +97,7 @@ declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readn
define <32 x i8> @test_x86_avx2_packuswb_fold() #0 {
; CHECK-LABEL: @test_x86_avx2_packuswb_fold(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP_VECTOR_PACK:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> zeroinitializer, <16 x i16> zeroinitializer)
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> <i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
@@ -107,6 +113,7 @@ define <32 x i8> @test_x86_avx2_pavg_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pavg_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
@@ -123,6 +130,7 @@ define <16 x i16> @test_x86_avx2_pavg_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pavg_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -139,6 +147,7 @@ define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmadd_wd(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP3]] to <8 x i32>
@@ -157,14 +166,15 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readn
define i32 @test_x86_avx2_pmovmskb(<32 x i8> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_pmovmskb(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <32 x i8> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6:[0-9]+]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -179,6 +189,7 @@ define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmulh_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -195,6 +206,7 @@ define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmulhu_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -211,6 +223,7 @@ define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psad_bw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i8> [[TMP3]] to <4 x i64>
@@ -231,6 +244,7 @@ define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -253,6 +267,7 @@ define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -275,6 +290,7 @@ define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psll_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -296,6 +312,7 @@ declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnon
define <8 x i32> @test_x86_avx2_pslli_d(<8 x i32> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_pslli_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP2]], zeroinitializer
@@ -312,6 +329,7 @@ declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_pslli_q(<4 x i64> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_pslli_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP2]], zeroinitializer
@@ -328,6 +346,7 @@ declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) nounwind readnone
define <16 x i16> @test_x86_avx2_pslli_w(<16 x i16> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_pslli_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i16> [[TMP2]], zeroinitializer
@@ -345,6 +364,7 @@ define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -367,6 +387,7 @@ define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psra_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -388,6 +409,7 @@ declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnon
define <8 x i32> @test_x86_avx2_psrai_d(<8 x i32> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_psrai_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP2]], zeroinitializer
@@ -404,6 +426,7 @@ declare <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32>, i32) nounwind readnone
define <16 x i16> @test_x86_avx2_psrai_w(<16 x i16> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_psrai_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i16> [[TMP2]], zeroinitializer
@@ -421,6 +444,7 @@ define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -443,6 +467,7 @@ define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -465,6 +490,7 @@ define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -487,16 +513,17 @@ define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, ptr %p) #0 {
; CHECK-LABEL: @test_x86_avx2_psrl_w_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP15:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A1:%.*]] = load <8 x i16>, ptr [[P:%.*]], align 16
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i16>, ptr [[TMP7]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i16> [[_MSLD]] to i128
@@ -519,6 +546,7 @@ define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, ptr %p) #0 {
define <8 x i32> @test_x86_avx2_psrli_d(<8 x i32> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_psrli_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i32> [[TMP2]], zeroinitializer
@@ -535,6 +563,7 @@ declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) nounwind readnone
define <4 x i64> @test_x86_avx2_psrli_q(<4 x i64> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_psrli_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[TMP2]], zeroinitializer
@@ -551,6 +580,7 @@ declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) nounwind readnone
define <16 x i16> @test_x86_avx2_psrli_w(<16 x i16> %a0) #0 {
; CHECK-LABEL: @test_x86_avx2_psrli_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i16> [[TMP2]], zeroinitializer
@@ -568,6 +598,7 @@ define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
@@ -584,6 +615,7 @@ define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -600,6 +632,7 @@ define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phadd_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -616,6 +649,7 @@ define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
@@ -632,6 +666,7 @@ define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -648,6 +683,7 @@ define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_phsub_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -664,6 +700,7 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i8> [[TMP3]] to <16 x i16>
@@ -682,16 +719,17 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) #
; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A0:%.*]] = load <32 x i8>, ptr [[PTR:%.*]], align 32
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[PTR]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP7]], align 32
; CHECK-NEXT: [[TMP8:%.*]] = or <32 x i8> [[_MSLD]], [[TMP2]]
@@ -711,6 +749,7 @@ define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pmul_hr_sw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -727,6 +766,7 @@ define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pshuf_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
@@ -743,6 +783,7 @@ define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
@@ -759,6 +800,7 @@ define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
@@ -775,6 +817,7 @@ define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psign_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
@@ -791,17 +834,18 @@ define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_mpsadbw(
; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <32 x i8> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i8> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP4]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]], i8 7)
; CHECK-NEXT: store <16 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i16> [[RES]]
@@ -815,16 +859,17 @@ define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_mpsadbw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A0:%.*]] = load <32 x i8>, ptr [[PTR:%.*]], align 32
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[PTR]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP7]], align 32
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <32 x i8> [[_MSLD]] to i256
@@ -832,11 +877,11 @@ define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <32 x i8> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i256 [[TMP9]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP1]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
-; CHECK: 10:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
+; CHECK: 11:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 11:
+; CHECK: 12:
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> [[A0]], <32 x i8> [[A1:%.*]], i8 7)
; CHECK-NEXT: store <16 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i16> [[RES]]
@@ -850,6 +895,7 @@ define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_packusdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -868,6 +914,7 @@ declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readno
define <16 x i16> @test_x86_avx2_packusdw_fold() #0 {
; CHECK-LABEL: @test_x86_avx2_packusdw_fold(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> zeroinitializer)
; CHECK-NEXT: [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
@@ -881,18 +928,19 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() #0 {
define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendvb(
-; CHECK-NEXT: [[TMP1:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP4:%.*]] = ashr <32 x i8> [[A2:%.*]], splat (i8 7)
-; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i8> [[TMP4]] to <32 x i1>
-; CHECK-NEXT: [[TMP6:%.*]] = ashr <32 x i8> [[TMP1]], splat (i8 7)
+; CHECK-NEXT: [[TMP10:%.*]] = ashr <32 x i8> [[A2:%.*]], splat (i8 7)
+; CHECK-NEXT: [[TMP5:%.*]] = trunc <32 x i8> [[TMP10]] to <32 x i1>
+; CHECK-NEXT: [[TMP6:%.*]] = ashr <32 x i8> [[TMP4]], splat (i8 7)
; CHECK-NEXT: [[TMP7:%.*]] = trunc <32 x i8> [[TMP6]] to <32 x i1>
-; CHECK-NEXT: [[TMP8:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> [[TMP2]], <32 x i8> [[TMP3]]
-; CHECK-NEXT: [[TMP9:%.*]] = xor <32 x i8> [[A1:%.*]], [[A0:%.*]]
-; CHECK-NEXT: [[TMP10:%.*]] = or <32 x i8> [[TMP9]], [[TMP2]]
-; CHECK-NEXT: [[TMP11:%.*]] = or <32 x i8> [[TMP10]], [[TMP3]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> [[TMP2]], <32 x i8> [[TMP9]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor <32 x i8> [[A1:%.*]], [[A0:%.*]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <32 x i8> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = or <32 x i8> [[_MSPROP]], [[TMP9]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <32 x i1> [[TMP7]], <32 x i8> [[TMP11]], <32 x i8> [[TMP8]]
; CHECK-NEXT: [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A0]], <32 x i8> [[A1]], <32 x i8> [[A2]])
; CHECK-NEXT: store <32 x i8> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
@@ -908,6 +956,7 @@ define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]], <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -924,6 +973,7 @@ define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendd_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> <i32 4, i32 5, i32 6, i32 3>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]], <4 x i32> <i32 4, i32 5, i32 6, i32 3>
@@ -940,6 +990,7 @@ define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_pblendd_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]], <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -956,6 +1007,7 @@ define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_permd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
@@ -972,17 +1024,18 @@ define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_permps(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP4]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx2.permps(<8 x float> [[A0:%.*]], <8 x i32> [[A1:%.*]])
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x float> [[RES]]
@@ -997,16 +1050,17 @@ define <2 x i64> @test_x86_avx2_maskload_q(ptr %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_q(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
-; CHECK: 4:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
+; CHECK: 5:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 5:
+; CHECK: 6:
; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.maskload.q(ptr [[A0:%.*]], <2 x i64> [[A1:%.*]])
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x i64> [[RES]]
@@ -1021,16 +1075,17 @@ define <4 x i64> @test_x86_avx2_maskload_q_256(ptr %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP3]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
-; CHECK: 4:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
+; CHECK: 5:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 5:
+; CHECK: 6:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr [[A0:%.*]], <4 x i64> [[A1:%.*]])
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i64> [[RES]]
@@ -1045,16 +1100,17 @@ define <4 x i32> @test_x86_avx2_maskload_d(ptr %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_d(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
-; CHECK: 4:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
+; CHECK: 5:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 5:
+; CHECK: 6:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.maskload.d(ptr [[A0:%.*]], <4 x i32> [[A1:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -1069,16 +1125,17 @@ define <8 x i32> @test_x86_avx2_maskload_d_256(ptr %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_maskload_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i256 [[TMP3]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
-; CHECK: 4:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
+; CHECK: 5:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 5:
+; CHECK: 6:
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr [[A0:%.*]], <8 x i32> [[A1:%.*]])
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
@@ -1094,6 +1151,7 @@ define void @test_x86_avx2_maskstore_q(ptr %a0, <2 x i64> %a1, <2 x i64> %a2) #0
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -1102,11 +1160,11 @@ define void @test_x86_avx2_maskstore_q(ptr %a0, <2 x i64> %a1, <2 x i64> %a2) #0
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP5]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: call void @llvm.x86.avx2.maskstore.q(ptr [[A0:%.*]], <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]])
; CHECK-NEXT: ret void
;
@@ -1121,6 +1179,7 @@ define void @test_x86_avx2_maskstore_q_256(ptr %a0, <4 x i64> %a1, <4 x i64> %a2
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -1129,11 +1188,11 @@ define void @test_x86_avx2_maskstore_q_256(ptr %a0, <4 x i64> %a1, <4 x i64> %a2
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP3]] to i256
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i256 [[TMP5]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: call void @llvm.x86.avx2.maskstore.q.256(ptr [[A0:%.*]], <4 x i64> [[A1:%.*]], <4 x i64> [[A2:%.*]])
; CHECK-NEXT: ret void
;
@@ -1148,6 +1207,7 @@ define void @test_x86_avx2_maskstore_d(ptr %a0, <4 x i32> %a1, <4 x i32> %a2) #0
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -1156,11 +1216,11 @@ define void @test_x86_avx2_maskstore_d(ptr %a0, <4 x i32> %a1, <4 x i32> %a2) #0
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP5]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: call void @llvm.x86.avx2.maskstore.d(ptr [[A0:%.*]], <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]])
; CHECK-NEXT: ret void
;
@@ -1175,6 +1235,7 @@ define void @test_x86_avx2_maskstore_d_256(ptr %a0, <8 x i32> %a1, <8 x i32> %a2
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -1183,11 +1244,11 @@ define void @test_x86_avx2_maskstore_d_256(ptr %a0, <8 x i32> %a1, <8 x i32> %a2
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP3]] to i256
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i256 [[TMP5]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: call void @llvm.x86.avx2.maskstore.d.256(ptr [[A0:%.*]], <8 x i32> [[A1:%.*]], <8 x i32> [[A2:%.*]])
; CHECK-NEXT: ret void
;
@@ -1201,6 +1262,7 @@ define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -1216,6 +1278,7 @@ define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
define <4 x i32> @test_x86_avx2_psllv_d_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_d_const(
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>)
; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer
@@ -1240,6 +1303,7 @@ define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -1255,6 +1319,7 @@ define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
define <8 x i32> @test_x86_avx2_psllv_d_256_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_d_256_const(
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 0, i32 33, i32 -1, i32 2, i32 0, i32 34, i32 -2>)
; CHECK-NEXT: [[TMP2:%.*]] = or <8 x i32> [[TMP1]], zeroinitializer
@@ -1279,6 +1344,7 @@ define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
@@ -1293,6 +1359,7 @@ define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
}
define <2 x i64> @test_x86_avx2_psllv_q_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_q_const(
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> zeroinitializer, <2 x i64> <i64 1, i64 -1>)
; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
@@ -1310,6 +1377,7 @@ define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64>
@@ -1325,6 +1393,7 @@ define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
define <4 x i64> @test_x86_avx2_psllv_q_256_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psllv_q_256_const(
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
@@ -1342,6 +1411,7 @@ define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -1357,6 +1427,7 @@ define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
define <4 x i32> @test_x86_avx2_psrlv_d_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_d_const(
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>)
; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer
@@ -1381,6 +1452,7 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -1396,6 +1468,7 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
define <8 x i32> @test_x86_avx2_psrlv_d_256_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_d_256_const(
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 0, i32 33, i32 -1, i32 2, i32 0, i32 34, i32 -2>)
; CHECK-NEXT: [[TMP2:%.*]] = or <8 x i32> [[TMP1]], zeroinitializer
@@ -1420,6 +1493,7 @@ define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
@@ -1435,6 +1509,7 @@ define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
define <2 x i64> @test_x86_avx2_psrlv_q_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_q_const(
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> zeroinitializer, <2 x i64> <i64 1, i64 -1>)
; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
@@ -1452,6 +1527,7 @@ define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_q_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64>
@@ -1468,6 +1544,7 @@ define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
define <4 x i64> @test_x86_avx2_psrlv_q_256_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psrlv_q_256_const(
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
@@ -1485,6 +1562,7 @@ define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrav_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -1500,6 +1578,7 @@ define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
define <4 x i32> @test_x86_avx2_psrav_d_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psrav_d_const(
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer
@@ -1516,6 +1595,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_avx2_psrav_d_256(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -1531,6 +1611,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
define <8 x i32> @test_x86_avx2_psrav_d_256_const() #0 {
; CHECK-LABEL: @test_x86_avx2_psrav_d_256_const(
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
; CHECK-NEXT: [[TMP2:%.*]] = or <8 x i32> [[TMP1]], zeroinitializer
@@ -1549,6 +1630,7 @@ define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, ptr %a1, <4 x i
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1560,11 +1642,11 @@ define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, ptr %a1, <4 x i
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <2 x double> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x double> [[RES]]
@@ -1582,6 +1664,7 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, ptr %a1, <4
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1593,11 +1676,11 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, ptr %a1, <4
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i64> [[TMP4]] to i256
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x double> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x double> [[RES]]
@@ -1615,6 +1698,7 @@ define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, ptr %a1, <2 x i
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1626,11 +1710,11 @@ define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, ptr %a1, <2 x i
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x double> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x double> [[RES]]
@@ -1648,6 +1732,7 @@ define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, ptr %a1, <4
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1659,11 +1744,11 @@ define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, ptr %a1, <4
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i64> [[TMP4]] to i256
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> [[A0:%.*]], ptr [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x double> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x double> [[RES]]
@@ -1681,6 +1766,7 @@ define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, ptr %a1, <4 x i32
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1692,11 +1778,11 @@ define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, ptr %a1, <4 x i32
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -1714,6 +1800,7 @@ define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, ptr %a1, <8 x
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1725,11 +1812,11 @@ define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, ptr %a1, <8 x
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <8 x i32> [[TMP4]] to i256
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> [[A0:%.*]], ptr [[A1:%.*]], <8 x i32> [[IDX:%.*]], <8 x float> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x float> [[RES]]
@@ -1747,6 +1834,7 @@ define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, ptr %a1, <2 x i64
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1758,11 +1846,11 @@ define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, ptr %a1, <2 x i64
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -1780,6 +1868,7 @@ define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, ptr %a1, <4 x
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1791,11 +1880,11 @@ define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, ptr %a1, <4 x
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> [[A0:%.*]], ptr [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -1813,6 +1902,7 @@ define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, ptr %a1, <4 x i32> %id
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1824,11 +1914,11 @@ define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, ptr %a1, <4 x i32> %id
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <2 x i64> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x i64> [[RES]]
@@ -1846,6 +1936,7 @@ define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, ptr %a1, <4 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1857,11 +1948,11 @@ define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, ptr %a1, <4 x i32>
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i64> [[TMP4]] to i256
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x i64> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i64> [[RES]]
@@ -1879,6 +1970,7 @@ define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, ptr %a1, <2 x i64> %id
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1890,11 +1982,11 @@ define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, ptr %a1, <2 x i64> %id
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x i64> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x i64> [[RES]]
@@ -1912,6 +2004,7 @@ define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, ptr %a1, <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1923,11 +2016,11 @@ define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, ptr %a1, <4 x i64>
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i64> [[TMP4]] to i256
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> [[A0:%.*]], ptr [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x i64> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i64> [[RES]]
@@ -1945,6 +2038,7 @@ define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, ptr %a1, <4 x i32> %id
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1956,11 +2050,11 @@ define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, ptr %a1, <4 x i32> %id
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -1978,6 +2072,7 @@ define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, ptr %a1, <8 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1989,11 +2084,11 @@ define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, ptr %a1, <8 x i32>
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <8 x i32> [[TMP4]] to i256
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> [[A0:%.*]], ptr [[A1:%.*]], <8 x i32> [[IDX:%.*]], <8 x i32> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
@@ -2011,6 +2106,7 @@ define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, ptr %a1, <2 x i64> %id
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -2022,11 +2118,11 @@ define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, ptr %a1, <2 x i64> %id
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -2044,6 +2140,7 @@ define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, ptr %a1, <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -2055,11 +2152,11 @@ define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, ptr %a1, <4 x i64>
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x i32> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -2078,6 +2175,7 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP6]], 0
@@ -2089,20 +2187,20 @@ define <8 x float> @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP4]] to i256
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i256 [[TMP8]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
-; CHECK: 9:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
+; CHECK: 10:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 10:
+; CHECK: 11:
; CHECK-NEXT: [[RES:%.*]] = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> [[A0:%.*]], ptr [[A:%.*]], <8 x i32> [[IDX:%.*]], <8 x float> [[MASK:%.*]], i8 4)
; CHECK-NEXT: [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT: br i1 [[_MSCMP6]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
-; CHECK: 11:
+; CHECK-NEXT: br i1 [[_MSCMP6]], label [[TMP12:%.*]], label [[TMP16:%.*]], !prof [[PROF1]]
+; CHECK: 12:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 12:
+; CHECK: 13:
; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[OUT:%.*]] to i64
-; CHECK-NEXT: [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
; CHECK-NEXT: store <8 x i32> [[TMP4]], ptr [[TMP15]], align 4
; CHECK-NEXT: store <8 x float> [[MASK]], ptr [[OUT]], align 4
@@ -2123,6 +2221,7 @@ define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, ptr %a1, <2 x i64> %idx
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
; CHECK-NEXT: [[MASK1:%.*]] = sext <2 x i1> [[MASK:%.*]] to <2 x i64>
@@ -2136,11 +2235,11 @@ define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, ptr %a1, <2 x i64> %idx
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[_MSPROP]] to i128
; CHECK-NEXT: [[_MSCMP4:%.*]] = icmp ne i128 [[TMP7]], 0
; CHECK-NEXT: [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x i64> [[MASK1]], i8 2)
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x i64> [[RES]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll
index f89f489b111f9e..74ce562be65e1d 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll
@@ -2,7 +2,7 @@
; RUN: opt %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
declare <1 x i64> @llvm.x86.ssse3.phadd.w(<1 x i64>, <1 x i64>) nounwind readnone
@@ -12,6 +12,7 @@ define i64 @test1(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -52,6 +53,7 @@ define i64 @test88(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -92,6 +94,7 @@ define i64 @test87(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -132,6 +135,7 @@ define i64 @test86(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -172,6 +176,7 @@ define i64 @test85(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -212,6 +217,7 @@ define i64 @test84(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -252,6 +258,7 @@ define i64 @test83(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -292,6 +299,7 @@ define i64 @test82(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -332,6 +340,7 @@ define i64 @test81(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -372,6 +381,7 @@ define i64 @test80(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -412,6 +422,7 @@ define i64 @test79(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -452,6 +463,7 @@ define i64 @test78(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -492,6 +504,7 @@ define i64 @test77(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -532,6 +545,7 @@ define i64 @test76(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -580,6 +594,7 @@ define i64 @test75(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -628,6 +643,7 @@ define i64 @test74(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP16:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP17:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP19:%.*]] = bitcast <1 x i64> [[TMP16]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -675,6 +691,7 @@ define i64 @test73(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -709,6 +726,7 @@ define i64 @test72(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -741,6 +759,7 @@ define i64 @test72_2(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -775,6 +794,7 @@ define i64 @test71(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP5]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -803,6 +823,7 @@ define i64 @test70(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -835,6 +856,7 @@ define i64 @test70_2(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -869,6 +891,7 @@ define i64 @test69(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -903,6 +926,7 @@ define i64 @test68(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP5]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -931,6 +955,7 @@ define i64 @test67(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -965,6 +990,7 @@ define i64 @test66(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -997,6 +1023,7 @@ define i64 @test66_2(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -1032,6 +1059,7 @@ define i64 @test65(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -1077,6 +1105,7 @@ define i64 @test64(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -1122,6 +1151,7 @@ define i64 @test63(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -1161,6 +1191,7 @@ define i64 @test62(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -1206,6 +1237,7 @@ define i64 @test61(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -1251,6 +1283,7 @@ define i64 @test60(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP7]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -1290,6 +1323,7 @@ define i64 @test59(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -1335,6 +1369,7 @@ define i64 @test58(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -1380,6 +1415,7 @@ define i64 @test56(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -1420,6 +1456,7 @@ define i64 @test55(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -1460,6 +1497,7 @@ define i64 @test54(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -1500,6 +1538,7 @@ define i64 @test53(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -1540,6 +1579,7 @@ define i64 @test52(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -1578,6 +1618,7 @@ define i64 @test51(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -1618,6 +1659,7 @@ define i64 @test50(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -1658,6 +1700,7 @@ define i64 @test49(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP13:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP16:%.*]] = bitcast <1 x i64> [[TMP13]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -1702,6 +1745,7 @@ define i64 @test48(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -1742,6 +1786,7 @@ define i64 @test47(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -1782,6 +1827,7 @@ define i64 @test46(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -1822,6 +1868,7 @@ define i64 @test45(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -1860,6 +1907,7 @@ define i64 @test44(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -1896,6 +1944,7 @@ define i64 @test43(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -1936,6 +1985,7 @@ define i64 @test42(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -1976,6 +2026,7 @@ define i64 @test41(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -2016,6 +2067,7 @@ define i64 @test40(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -2056,6 +2108,7 @@ define i64 @test39(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -2096,6 +2149,7 @@ define i64 @test38(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -2136,6 +2190,7 @@ define i64 @test37(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -2176,6 +2231,7 @@ define i64 @test36(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP4]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -2210,6 +2266,7 @@ define i64 @test35(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -2250,6 +2307,7 @@ define i64 @test34(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -2290,6 +2348,7 @@ define i64 @test33(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -2330,6 +2389,7 @@ define i64 @test32(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = bitcast <1 x i64> [[TMP5]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -2369,6 +2429,7 @@ define i64 @test31(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -2409,6 +2470,7 @@ define i64 @test30(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -2449,6 +2511,7 @@ define i64 @test29(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -2489,6 +2552,7 @@ define i64 @test28(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -2529,6 +2593,7 @@ define i64 @test27(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -2569,6 +2634,7 @@ define i64 @test26(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -2609,21 +2675,22 @@ define void @test25(ptr %p, <1 x i64> %a) nounwind optsize ssp #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP2]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[_MSPROP]] to <1 x i64>
; CHECK-NEXT: [[MMX_VAR_I:%.*]] = bitcast i64 [[TMP0]] to <1 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: store <1 x i64> [[TMP3]], ptr [[TMP6]], align 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: 7:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: 8:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6:[0-9]+]]
; CHECK-NEXT: unreachable
-; CHECK: 8:
+; CHECK: 9:
; CHECK-NEXT: tail call void @llvm.x86.mmx.movnt.dq(ptr [[P]], <1 x i64> [[MMX_VAR_I]]) #[[ATTR2]]
; CHECK-NEXT: ret void
;
@@ -2641,6 +2708,7 @@ define i32 @test24(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <1 x i64> [[TMP2]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
@@ -2648,11 +2716,11 @@ define i32 @test24(<1 x i64> %a) #0 {
; CHECK-NEXT: [[MMX_VAR_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP4]] to i64
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.x86.mmx.pmovmskb(<1 x i64> [[MMX_VAR_I]]) #[[ATTR2]]
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[TMP1]]
@@ -2673,6 +2741,7 @@ define void @test23(<1 x i64> %d, <1 x i64> %n, ptr %p) nounwind optsize ssp #0
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP4]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[N]] to <8 x i8>
@@ -2689,11 +2758,11 @@ define void @test23(<1 x i64> %d, <1 x i64> %n, ptr %p) nounwind optsize ssp #0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i64 [[TMP2]], 0
; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
-; CHECK: 11:
+; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
+; CHECK: 12:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 12:
+; CHECK: 13:
; CHECK-NEXT: tail call void @llvm.x86.mmx.maskmovq(<1 x i64> [[MMX_VAR_I]], <1 x i64> [[MMX_VAR1_I]], ptr [[P]]) #[[ATTR2]]
; CHECK-NEXT: ret void
;
@@ -2714,6 +2783,7 @@ define i64 @test22(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <1 x i64> [[TMP8]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -2753,6 +2823,7 @@ define i64 @test21(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -2760,11 +2831,11 @@ define i64 @test21(<1 x i64> %a) #0 {
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x i16> [[TMP0]] to <1 x i64>
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <1 x i64> [[TMP10]] to i64
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP14:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: [[TMP13:%.*]] = tail call <1 x i64> @llvm.x86.sse.pshuf.w(<1 x i64> [[TMP11]], i8 3) #[[ATTR5]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP13]] to <4 x i16>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <1 x i64>
@@ -2787,6 +2858,7 @@ define i32 @test21_2(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -2794,11 +2866,11 @@ define i32 @test21_2(<1 x i64> %a) #0 {
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x i16> [[TMP0]] to <1 x i64>
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <1 x i64> [[TMP10]] to i64
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP14:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: [[TMP13:%.*]] = tail call <1 x i64> @llvm.x86.sse.pshuf.w(<1 x i64> [[TMP11]], i8 3) #[[ATTR5]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP13]] to <4 x i16>
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i16> [[TMP3]] to <2 x i32>
@@ -2824,6 +2896,7 @@ define i64 @test20(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <1 x i64> [[TMP5]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -2857,6 +2930,7 @@ define <2 x double> @test19(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <1 x i64> [[TMP4]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -2864,11 +2938,11 @@ define <2 x double> @test19(<1 x i64> %a) #0 {
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <2 x i32> [[TMP0]] to <1 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP5]] to i64
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 6:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 7:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 7:
+; CHECK: 8:
; CHECK-NEXT: [[TMP2:%.*]] = tail call <2 x double> @llvm.x86.sse.cvtpi2pd(<1 x i64> [[TMP8]]) #[[ATTR5]]
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x double> [[TMP2]]
@@ -2887,14 +2961,15 @@ define i64 @test18(<2 x double> %a) #0 {
; CHECK-SAME: <2 x double> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 2:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
+; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 3:
+; CHECK: 4:
; CHECK-NEXT: [[TMP8:%.*]] = tail call <1 x i64> @llvm.x86.sse.cvttpd2pi(<2 x double> [[A]]) #[[ATTR5]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP1]] to <1 x i64>
@@ -2917,14 +2992,15 @@ define i64 @test17(<2 x double> %a) #0 {
; CHECK-SAME: <2 x double> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
-; CHECK: 2:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
+; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 3:
+; CHECK: 4:
; CHECK-NEXT: [[TMP8:%.*]] = tail call <1 x i64> @llvm.x86.sse.cvtpd2pi(<2 x double> [[A]]) #[[ATTR5]]
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <1 x i64> [[TMP8]] to <2 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP1]] to <1 x i64>
@@ -2948,6 +3024,7 @@ define i64 @test16(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP6:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = extractelement <1 x i64> [[TMP6]], i32 0
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <1 x i64> [[A]], i32 0
@@ -2962,11 +3039,11 @@ define i64 @test16(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP5]] to i64
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i64 [[TMP12]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP9:%.*]], label [[TMP13:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[TMP10:%.*]] = tail call <1 x i64> @llvm.x86.mmx.palignr.b(<1 x i64> [[MMX_VAR]], <1 x i64> [[MMX_VAR1]], i8 16)
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <1 x i64> [[TMP10]] to i64
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
@@ -2989,6 +3066,7 @@ define i64 @test15(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <2 x i32>
@@ -3021,6 +3099,7 @@ define i64 @test14(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <4 x i16>
@@ -3053,6 +3132,7 @@ define i64 @test13(<1 x i64> %a) #0 {
; CHECK-SAME: <1 x i64> [[A:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP7:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <1 x i64> [[TMP7]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
@@ -3086,6 +3166,7 @@ define i64 @test12(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -3126,6 +3207,7 @@ define i64 @test11(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -3166,6 +3248,7 @@ define i64 @test10(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -3206,6 +3289,7 @@ define i64 @test9(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -3246,6 +3330,7 @@ define i64 @test8(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -3286,6 +3371,7 @@ define i64 @test7(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP15:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP17:%.*]] = bitcast <1 x i64> [[TMP9]] to <8 x i8>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
@@ -3330,6 +3416,7 @@ define i64 @test6(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -3370,6 +3457,7 @@ define i64 @test5(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -3410,6 +3498,7 @@ define i64 @test4(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -3450,6 +3539,7 @@ define i64 @test3(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <4 x i16>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <4 x i16>
@@ -3490,6 +3580,7 @@ define i64 @test2(<1 x i64> %a, <1 x i64> %b) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP9:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP12:%.*]] = bitcast <1 x i64> [[TMP9]] to <2 x i32>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <2 x i32>
@@ -3531,17 +3622,18 @@ define <4 x float> @test89(<4 x float> %a, <1 x i64> %b) nounwind #0 {
; CHECK-SAME: <4 x float> [[A:%.*]], <1 x i64> [[B:%.*]]) #[[ATTR4:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <1 x i64> [[TMP4]] to i64
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[C:%.*]] = tail call <4 x float> @llvm.x86.sse.cvtpi2ps(<4 x float> [[A]], <1 x i64> [[B]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[C]]
@@ -3559,6 +3651,7 @@ define void @test90() #0 {
; ALL-NEXT: ret{{[l|q]}}
; CHECK-LABEL: define void @test90(
; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: call void @llvm.x86.mmx.emms()
; CHECK-NEXT: ret void
@@ -3575,16 +3668,17 @@ define <1 x i64> @test_mm_insert_pi16(<1 x i64> %a.coerce, i32 %d) nounwind #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP3:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i32 [[TMP6]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[TMP9:%.*]] = tail call <1 x i64> @llvm.x86.mmx.pinsr.w(<1 x i64> [[A_COERCE]], i32 [[D]], i32 2)
; CHECK-NEXT: store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <1 x i64> [[TMP9]]
@@ -3601,14 +3695,15 @@ define i32 @test_mm_extract_pi16(<1 x i64> %a.coerce) nounwind #0 {
; CHECK-SAME: <1 x i64> [[A_COERCE:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP2:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <1 x i64> [[TMP2]] to i64
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP3:%.*]], !prof [[PROF1]]
-; CHECK: 2:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
+; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR6]]
; CHECK-NEXT: unreachable
-; CHECK: 3:
+; CHECK: 4:
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.x86.mmx.pextr.w(<1 x i64> [[A_COERCE]], i32 2)
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[TMP1]]
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86_bts_asm.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386_bts_asm.ll
similarity index 93%
rename from llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86_bts_asm.ll
rename to llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386_bts_asm.ll
index e9738d43e8bd52..a37c4f64b4de2c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86_bts_asm.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386_bts_asm.ll
@@ -4,7 +4,7 @@
; RUN: opt < %s -msan-kernel=1 -msan-check-access-address=0 -msan-handle-asm-conservative=1 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CONS
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
define dso_local i32 @main() sanitize_memory {
; CHECK-LABEL: define dso_local i32 @main(
@@ -18,6 +18,7 @@ define dso_local i32 @main() sanitize_memory {
; CHECK-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CHECK-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CHECK-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[VA_ARG_OVERFLOW_SIZE]], align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @__msan_poison_alloca(ptr [[RETVAL]], i64 4, ptr @[[GLOB0:[0-9]+]])
@@ -56,11 +57,11 @@ define dso_local i32 @main() sanitize_memory {
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP15]], align 8
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 8
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSLD]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB18:.*]], label %[[BB19:.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: [[BB18]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP17]]) #[[ATTR2:[0-9]+]]
-; CHECK-NEXT: br label %[[BB19]]
+; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB19:.*]], label %[[BB20:.*]], !prof [[PROF1:![0-9]+]]
; CHECK: [[BB19]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP17]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: br label %[[BB20]]
+; CHECK: [[BB20]]:
; CHECK-NEXT: call void asm "btsq $2, $1
; CHECK-NEXT: [[TMP20:%.*]] = load i8, ptr [[BIT]], align 1
; CHECK-NEXT: [[TMP21:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_1(ptr [[BIT]])
@@ -70,11 +71,11 @@ define dso_local i32 @main() sanitize_memory {
; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
; CHECK-NEXT: [[_MSPROP:%.*]] = trunc i8 [[_MSLD1]] to i1
; CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP20]] to i1
-; CHECK-NEXT: br i1 [[_MSPROP]], label %[[BB25:.*]], label %[[BB26:.*]], !prof [[PROF1]]
-; CHECK: [[BB25]]:
-; CHECK-NEXT: call void @__msan_warning(i32 [[TMP24]]) #[[ATTR2]]
-; CHECK-NEXT: br label %[[BB26]]
+; CHECK-NEXT: br i1 [[_MSPROP]], label %[[BB26:.*]], label %[[BB27:.*]], !prof [[PROF1]]
; CHECK: [[BB26]]:
+; CHECK-NEXT: call void @__msan_warning(i32 [[TMP24]]) #[[ATTR2]]
+; CHECK-NEXT: br label %[[BB27]]
+; CHECK: [[BB27]]:
; CHECK-NEXT: br i1 [[TOBOOL]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: store i32 0, ptr [[RETVAL_SHADOW]], align 8
@@ -94,6 +95,7 @@ define dso_local i32 @main() sanitize_memory {
; CONS-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; CONS-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; CONS-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; CONS-NEXT: [[TMP18:%.*]] = load i64, ptr [[VA_ARG_OVERFLOW_SIZE]], align 8
; CONS-NEXT: call void @llvm.donothing()
; CONS-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
; CONS-NEXT: call void @__msan_poison_alloca(ptr [[RETVAL]], i64 4, ptr @[[GLOB0:[0-9]+]])
@@ -134,11 +136,11 @@ define dso_local i32 @main() sanitize_memory {
; CONS-NEXT: call void @__msan_instrument_asm_store(ptr [[BIT]], i64 1)
; CONS-NEXT: call void @__msan_instrument_asm_store(ptr [[ADDR]], i64 8)
; CONS-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[_MSLD]], 0
-; CONS-NEXT: br i1 [[_MSCMP]], label %[[BB18:.*]], label %[[BB19:.*]], !prof [[PROF1:![0-9]+]]
-; CONS: [[BB18]]:
-; CONS-NEXT: call void @__msan_warning(i32 [[TMP17]]) #[[ATTR2:[0-9]+]]
-; CONS-NEXT: br label %[[BB19]]
+; CONS-NEXT: br i1 [[_MSCMP]], label %[[BB19:.*]], label %[[BB20:.*]], !prof [[PROF1:![0-9]+]]
; CONS: [[BB19]]:
+; CONS-NEXT: call void @__msan_warning(i32 [[TMP17]]) #[[ATTR2:[0-9]+]]
+; CONS-NEXT: br label %[[BB20]]
+; CONS: [[BB20]]:
; CONS-NEXT: call void asm "btsq $2, $1
; CONS-NEXT: [[TMP20:%.*]] = load i8, ptr [[BIT]], align 1
; CONS-NEXT: [[TMP21:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_1(ptr [[BIT]])
@@ -148,11 +150,11 @@ define dso_local i32 @main() sanitize_memory {
; CONS-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
; CONS-NEXT: [[_MSPROP:%.*]] = trunc i8 [[_MSLD1]] to i1
; CONS-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP20]] to i1
-; CONS-NEXT: br i1 [[_MSPROP]], label %[[BB25:.*]], label %[[BB26:.*]], !prof [[PROF1]]
-; CONS: [[BB25]]:
-; CONS-NEXT: call void @__msan_warning(i32 [[TMP24]]) #[[ATTR2]]
-; CONS-NEXT: br label %[[BB26]]
+; CONS-NEXT: br i1 [[_MSPROP]], label %[[BB26:.*]], label %[[BB27:.*]], !prof [[PROF1]]
; CONS: [[BB26]]:
+; CONS-NEXT: call void @__msan_warning(i32 [[TMP24]]) #[[ATTR2]]
+; CONS-NEXT: br label %[[BB27]]
+; CONS: [[BB27]]:
; CONS-NEXT: br i1 [[TOBOOL]], label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]]
; CONS: [[IF_THEN]]:
; CONS-NEXT: store i32 0, ptr [[RETVAL_SHADOW]], align 8
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll
similarity index 85%
rename from llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86intrinsics.ll
rename to llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll
index fa09ca30027ef3..017bbcf4f3689d 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/msan_i386intrinsics.ll
@@ -5,17 +5,18 @@
; REQUIRES: x86-registered-target
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
; Store intrinsic.
define void @StoreIntrinsic(ptr %p, <4 x float> %x) nounwind uwtable sanitize_memory {
; CHECK-LABEL: define void @StoreIntrinsic(
; CHECK-SAME: ptr [[P:%.*]], <4 x float> [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[TMP4]], align 1
; CHECK-NEXT: store <4 x float> [[X]], ptr [[P]], align 1
@@ -23,20 +24,21 @@ define void @StoreIntrinsic(ptr %p, <4 x float> %x) nounwind uwtable sanitize_me
;
; ORIGINS-LABEL: define void @StoreIntrinsic(
; ORIGINS-SAME: ptr [[P:%.*]], <4 x float> [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+; ORIGINS-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGINS-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
-; ORIGINS-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; ORIGINS-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; ORIGINS-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; ORIGINS-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
+; ORIGINS-NEXT: [[TMP6:%.*]] = add i64 [[TMP4]], 1073741824
; ORIGINS-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -4
; ORIGINS-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; ORIGINS-NEXT: store <4 x i32> [[TMP1]], ptr [[TMP5]], align 1
; ORIGINS-NEXT: [[TMP9:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; ORIGINS-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP9]], 0
-; ORIGINS-NEXT: br i1 [[_MSCMP]], label %[[BB10:.*]], label %[[BB14:.*]], !prof [[PROF1:![0-9]+]]
-; ORIGINS: [[BB10]]:
+; ORIGINS-NEXT: br i1 [[_MSCMP]], label %[[BB11:.*]], label %[[BB15:.*]], !prof [[PROF1:![0-9]+]]
+; ORIGINS: [[BB11]]:
; ORIGINS-NEXT: store i32 [[TMP2]], ptr [[TMP8]], align 4
; ORIGINS-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP8]], i32 1
; ORIGINS-NEXT: store i32 [[TMP2]], ptr [[TMP11]], align 4
@@ -44,8 +46,8 @@ define void @StoreIntrinsic(ptr %p, <4 x float> %x) nounwind uwtable sanitize_me
; ORIGINS-NEXT: store i32 [[TMP2]], ptr [[TMP12]], align 4
; ORIGINS-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i32 3
; ORIGINS-NEXT: store i32 [[TMP2]], ptr [[TMP13]], align 4
-; ORIGINS-NEXT: br label %[[BB14]]
-; ORIGINS: [[BB14]]:
+; ORIGINS-NEXT: br label %[[BB15]]
+; ORIGINS: [[BB15]]:
; ORIGINS-NEXT: store <4 x float> [[X]], ptr [[P]], align 1
; ORIGINS-NEXT: ret void
;
@@ -62,9 +64,10 @@ declare void @llvm.x86.sse.storeu.ps(ptr, <4 x float>) nounwind
define <16 x i8> @LoadIntrinsic(ptr %p) nounwind uwtable sanitize_memory {
; CHECK-LABEL: define <16 x i8> @LoadIntrinsic(
; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -2147483649
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEXT: [[CALL:%.*]] = call <16 x i8> @llvm.x86.sse3.ldu.dq(ptr [[P]])
@@ -73,11 +76,12 @@ define <16 x i8> @LoadIntrinsic(ptr %p) nounwind uwtable sanitize_memory {
;
; ORIGINS-LABEL: define <16 x i8> @LoadIntrinsic(
; ORIGINS-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
+; ORIGINS-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
-; ORIGINS-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; ORIGINS-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -2147483649
; ORIGINS-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
-; ORIGINS-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
+; ORIGINS-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], 1073741824
; ORIGINS-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -4
; ORIGINS-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
@@ -104,6 +108,7 @@ define <8 x i16> @Pmulhuw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable saniti
; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[CALL:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A]], <8 x i16> [[B]])
@@ -116,6 +121,7 @@ define <8 x i16> @Pmulhuw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable saniti
; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; ORIGINS-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; ORIGINS-NEXT: [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; ORIGINS-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGINS-NEXT: call void @llvm.donothing()
; ORIGINS-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP3]]
; ORIGINS-NEXT: [[TMP5:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll
similarity index 89%
rename from llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-x86.ll
rename to llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll
index 97ba8699c71ab0..ffad6fb5a2b686 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-i386.ll
@@ -2,12 +2,13 @@
; RUN: opt %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_cmp_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
@@ -26,6 +27,7 @@ define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_cmp_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -46,6 +48,7 @@ define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comieq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -65,6 +68,7 @@ define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comige_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -84,6 +88,7 @@ define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comigt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -103,6 +108,7 @@ define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comile_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -122,6 +128,7 @@ define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comilt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -141,6 +148,7 @@ define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_comineq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -159,14 +167,15 @@ declare i32 @llvm.x86.sse.comineq.ss(<4 x float>, <4 x float>) nounwind readnone
define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse_cvtss2si(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.cvtss2si(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -180,14 +189,15 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse_cvttss2si(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -201,19 +211,20 @@ declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
define void @test_x86_sse_ldmxcsr(ptr %a0) #0 {
; CHECK-LABEL: @test_x86_sse_ldmxcsr(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT: [[_LDMXCSR:%.*]] = load i32, ptr [[TMP4]], align 1
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i32 [[_LDMXCSR]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: call void @llvm.x86.sse.ldmxcsr(ptr [[A0]])
; CHECK-NEXT: ret void
;
@@ -228,6 +239,7 @@ define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_max_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
@@ -244,6 +256,7 @@ define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_max_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -261,6 +274,7 @@ define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_min_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
@@ -277,6 +291,7 @@ define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_min_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 4, i32 1, i32 2, i32 3>
@@ -293,14 +308,15 @@ declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind read
define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse_movmsk_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -315,6 +331,7 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rcp_ps(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse_rcp_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
@@ -329,6 +346,7 @@ declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rcp_ss(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse_rcp_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
@@ -343,6 +361,7 @@ declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rsqrt_ps(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse_rsqrt_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
@@ -357,6 +376,7 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
define <4 x float> @test_x86_sse_rsqrt_ss(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse_rsqrt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
@@ -371,17 +391,18 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
define void @test_x86_sse_stmxcsr(ptr %a0) #0 {
; CHECK-LABEL: @test_x86_sse_stmxcsr(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT: store i32 0, ptr [[TMP4]], align 4
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: call void @llvm.x86.sse.stmxcsr(ptr [[A0]])
; CHECK-NEXT: ret void
;
@@ -395,6 +416,7 @@ define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomieq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -414,6 +436,7 @@ define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomige_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -433,6 +456,7 @@ define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomigt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -452,6 +476,7 @@ define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomile_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -471,6 +496,7 @@ define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomilt_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -490,6 +516,7 @@ define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse_ucomineq_ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -507,6 +534,7 @@ declare i32 @llvm.x86.sse.ucomineq.ss(<4 x float>, <4 x float>) nounwind readnon
define void @sfence() nounwind {
; CHECK-LABEL: @sfence(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: tail call void @llvm.x86.sse.sfence()
; CHECK-NEXT: ret void
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll
similarity index 88%
rename from llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-x86.ll
rename to llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll
index 9d075f7974cd91..3e5db7822b0ef3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-i386.ll
@@ -2,12 +2,13 @@
; RUN: opt %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_cmp_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -26,6 +27,7 @@ define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) #0
; CHECK-LABEL: @test_x86_sse2_cmp_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -46,6 +48,7 @@ define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comieq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -65,6 +68,7 @@ define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comige_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -84,6 +88,7 @@ define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comigt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -103,6 +108,7 @@ define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comile_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -122,6 +128,7 @@ define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comilt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -141,6 +148,7 @@ define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_comineq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -159,14 +167,15 @@ declare i32 @llvm.x86.sse2.comineq.sd(<2 x double>, <2 x double>) nounwind readn
define <4 x i32> @test_x86_sse2_cvtpd2dq(<2 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtpd2dq(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5:[0-9]+]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -180,14 +189,15 @@ declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_cvtpd_epi32_zext(<2 x double> %a0) nounwind #0 {
; CHECK-LABEL: @test_mm_cvtpd_epi32_zext(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[CVT:%.*]] = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> [[A0:%.*]])
; CHECK-NEXT: [[RES:%.*]] = shufflevector <4 x i32> [[CVT]], <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
@@ -204,25 +214,26 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext(<2 x double> %a0) nounwind #0 {
define <2 x i64> @test_mm_cvtpd_epi32_zext_load(ptr %p0) nounwind #0 {
; CHECK-LABEL: @test_mm_cvtpd_epi32_zext_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF1]]
-; CHECK: 2:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 3:
+; CHECK: 4:
; CHECK-NEXT: [[A0:%.*]] = load <2 x double>, ptr [[P0:%.*]], align 16
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P0]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[_MSLD]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[CVT:%.*]] = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> [[A0]])
; CHECK-NEXT: [[RES:%.*]] = shufflevector <4 x i32> [[CVT]], <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
@@ -240,14 +251,15 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext_load(ptr %p0) nounwind #0 {
define <4 x float> @test_x86_sse2_cvtpd2ps(<2 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtpd2ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -260,14 +272,15 @@ declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone
define <4 x float> @test_x86_sse2_cvtpd2ps_zext(<2 x double> %a0) nounwind #0 {
; CHECK-LABEL: @test_x86_sse2_cvtpd2ps_zext(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[CVT:%.*]] = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> [[A0:%.*]])
; CHECK-NEXT: [[RES:%.*]] = shufflevector <4 x float> [[CVT]], <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
@@ -281,25 +294,26 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext(<2 x double> %a0) nounwind #0 {
define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(ptr %p0) nounwind #0 {
; CHECK-LABEL: @test_x86_sse2_cvtpd2ps_zext_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF1]]
-; CHECK: 2:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 3:
+; CHECK: 4:
; CHECK-NEXT: [[A0:%.*]] = load <2 x double>, ptr [[P0:%.*]], align 16
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P0]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[_MSLD]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[CVT:%.*]] = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> [[A0]])
; CHECK-NEXT: [[RES:%.*]] = shufflevector <4 x float> [[CVT]], <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
@@ -314,14 +328,15 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(ptr %p0) nounwind #0 {
define <4 x i32> @test_x86_sse2_cvtps2dq(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtps2dq(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -335,14 +350,15 @@ declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
define i32 @test_x86_sse2_cvtsd2si(<2 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtsd2si(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -357,15 +373,16 @@ define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) #0
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> [[A0:%.*]], <2 x double> [[A1:%.*]])
; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -380,26 +397,27 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 {
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A1:%.*]] = load <2 x double>, ptr [[P1:%.*]], align 16
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P1]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP7]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[_MSLD]], i32 0
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP8]], 0
-; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
-; CHECK: 10:
+; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
+; CHECK: 11:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 11:
+; CHECK: 12:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> [[A0:%.*]], <2 x double> [[A1]])
; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -414,26 +432,27 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, ptr %p1
; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load_optsize(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A1:%.*]] = load <2 x double>, ptr [[P1:%.*]], align 16
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P1]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP7]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[_MSLD]], i32 0
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP8]], 0
-; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
-; CHECK: 10:
+; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
+; CHECK: 11:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 11:
+; CHECK: 12:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> [[A0:%.*]], <2 x double> [[A1]])
; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -447,14 +466,15 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, ptr %p1
define <4 x i32> @test_x86_sse2_cvttpd2dq(<2 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_cvttpd2dq(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -468,14 +488,15 @@ declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
define <2 x i64> @test_mm_cvttpd_epi32_zext(<2 x double> %a0) nounwind #0 {
; CHECK-LABEL: @test_mm_cvttpd_epi32_zext(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[CVT:%.*]] = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> [[A0:%.*]])
; CHECK-NEXT: [[RES:%.*]] = shufflevector <4 x i32> [[CVT]], <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
@@ -492,25 +513,26 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext(<2 x double> %a0) nounwind #0 {
define <2 x i64> @test_mm_cvttpd_epi32_zext_load(ptr %p0) nounwind #0 {
; CHECK-LABEL: @test_mm_cvttpd_epi32_zext_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF1]]
-; CHECK: 2:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP8:%.*]], !prof [[PROF1]]
+; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 3:
+; CHECK: 4:
; CHECK-NEXT: [[A0:%.*]] = load <2 x double>, ptr [[P0:%.*]], align 16
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[P0]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[_MSLD]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
-; CHECK: 8:
+; CHECK-NEXT: br i1 [[_MSCMP1]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 9:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 9:
+; CHECK: 10:
; CHECK-NEXT: [[CVT:%.*]] = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> [[A0]])
; CHECK-NEXT: [[RES:%.*]] = shufflevector <4 x i32> [[CVT]], <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
@@ -528,14 +550,15 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext_load(ptr %p0) nounwind #0 {
define <4 x i32> @test_x86_sse2_cvttps2dq(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_cvttps2dq(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> [[A0:%.*]])
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
@@ -549,14 +572,15 @@ declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
define i32 @test_x86_sse2_cvttsd2si(<2 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_cvttsd2si(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -571,6 +595,7 @@ define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) #0
; CHECK-LABEL: @test_x86_sse2_max_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
@@ -587,6 +612,7 @@ define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) #0
; CHECK-LABEL: @test_x86_sse2_max_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <2 x i32> <i32 2, i32 1>
@@ -604,6 +630,7 @@ define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) #0
; CHECK-LABEL: @test_x86_sse2_min_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
@@ -620,6 +647,7 @@ define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) #0
; CHECK-LABEL: @test_x86_sse2_min_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <2 x i32> <i32 2, i32 1>
@@ -636,14 +664,15 @@ declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind
define i32 @test_x86_sse2_movmsk_pd(<2 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_movmsk_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -658,6 +687,7 @@ define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packssdw_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -676,6 +706,7 @@ declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind rea
define <8 x i16> @test_x86_sse2_packssdw_128_fold() #0 {
; CHECK-LABEL: @test_x86_sse2_packssdw_128_fold(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP_VECTOR_PACK:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> zeroinitializer)
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
@@ -691,6 +722,7 @@ define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packsswb_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16>
@@ -709,6 +741,7 @@ declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <16 x i8> @test_x86_sse2_packsswb_128_fold() #0 {
; CHECK-LABEL: @test_x86_sse2_packsswb_128_fold(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> zeroinitializer)
; CHECK-NEXT: [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
@@ -724,6 +757,7 @@ define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_packuswb_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16>
@@ -742,6 +776,7 @@ declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind rea
define <16 x i8> @test_x86_sse2_packuswb_128_fold() #0 {
; CHECK-LABEL: @test_x86_sse2_packuswb_128_fold(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> zeroinitializer)
; CHECK-NEXT: [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
@@ -757,6 +792,7 @@ define <16 x i8> @test_x86_sse2_pavg_b(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pavg_b(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> [[A0:%.*]], <16 x i8> [[A1:%.*]])
@@ -773,6 +809,7 @@ define <8 x i16> @test_x86_sse2_pavg_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pavg_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
@@ -789,6 +826,7 @@ define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmadd_wd(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <4 x i32>
@@ -807,14 +845,15 @@ declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnon
define i32 @test_x86_sse2_pmovmskb_128(<16 x i8> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_pmovmskb_128(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> [[A0:%.*]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[RES]]
@@ -829,6 +868,7 @@ define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmulh_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
@@ -845,6 +885,7 @@ define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_pmulhu_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
@@ -861,6 +902,7 @@ define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psad_bw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64>
@@ -881,6 +923,7 @@ define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -903,6 +946,7 @@ define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -925,6 +969,7 @@ define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psll_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -946,6 +991,7 @@ declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse2_pslli_d(<4 x i32> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_pslli_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP2]], zeroinitializer
@@ -962,6 +1008,7 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_pslli_q(<2 x i64> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_pslli_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP2]], zeroinitializer
@@ -978,6 +1025,7 @@ declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
define <8 x i16> @test_x86_sse2_pslli_w(<8 x i16> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_pslli_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i16> [[TMP2]], zeroinitializer
@@ -995,6 +1043,7 @@ define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psra_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1017,6 +1066,7 @@ define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psra_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1038,6 +1088,7 @@ declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
define <4 x i32> @test_x86_sse2_psrai_d(<4 x i32> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_psrai_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP2]], zeroinitializer
@@ -1054,6 +1105,7 @@ declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone
define <8 x i16> @test_x86_sse2_psrai_w(<8 x i16> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_psrai_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i16> [[TMP2]], zeroinitializer
@@ -1071,6 +1123,7 @@ define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1093,6 +1146,7 @@ define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1115,6 +1169,7 @@ define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
; CHECK-NEXT: [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1137,16 +1192,17 @@ define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, ptr %p) #0 {
; CHECK-LABEL: @test_x86_sse2_psrl_w_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP15:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A1:%.*]] = load <8 x i16>, ptr [[P:%.*]], align 16
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i16>, ptr [[TMP7]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i16> [[_MSLD]] to i128
@@ -1169,6 +1225,7 @@ define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, ptr %p) #0 {
define <4 x i32> @test_x86_sse2_psrli_d(<4 x i32> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_psrli_d(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP2]], zeroinitializer
@@ -1185,6 +1242,7 @@ declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_x86_sse2_psrli_q(<2 x i64> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_psrli_q(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP2]], zeroinitializer
@@ -1201,6 +1259,7 @@ declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone
define <8 x i16> @test_x86_sse2_psrli_w(<8 x i16> %a0) #0 {
; CHECK-LABEL: @test_x86_sse2_psrli_w(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> [[TMP1]], i32 7)
; CHECK-NEXT: [[TMP3:%.*]] = or <8 x i16> [[TMP2]], zeroinitializer
@@ -1218,6 +1277,7 @@ define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomieq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1237,6 +1297,7 @@ define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomige_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1256,6 +1317,7 @@ define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomigt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1275,6 +1337,7 @@ define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomile_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1294,6 +1357,7 @@ define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomilt_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1313,6 +1377,7 @@ define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
; CHECK-LABEL: @test_x86_sse2_ucomineq_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -1329,6 +1394,7 @@ declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind read
define void @test_x86_sse2_pause() #0 {
; CHECK-LABEL: @test_x86_sse2_pause(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: tail call void @llvm.x86.sse2.pause()
; CHECK-NEXT: ret void
@@ -1340,6 +1406,7 @@ declare void @llvm.x86.sse2.pause() nounwind
define void @lfence() nounwind #0 {
; CHECK-LABEL: @lfence(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: tail call void @llvm.x86.sse2.lfence()
; CHECK-NEXT: ret void
@@ -1351,6 +1418,7 @@ declare void @llvm.x86.sse2.lfence() nounwind
define void @mfence() nounwind #0 {
; CHECK-LABEL: @mfence(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: tail call void @llvm.x86.sse2.mfence()
; CHECK-NEXT: ret void
@@ -1363,13 +1431,14 @@ declare void @llvm.x86.sse2.mfence() nounwind
define void @clflush(ptr %p) nounwind #0 {
; CHECK-LABEL: @clflush(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF1]]
-; CHECK: 2:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
+; CHECK: 3:
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
-; CHECK: 3:
+; CHECK: 4:
; CHECK-NEXT: tail call void @llvm.x86.sse2.clflush(ptr [[P:%.*]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll
similarity index 83%
rename from llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-x86.ll
rename to llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll
index f5e8452fd0d585..a3bda12e395e6c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-i386.ll
@@ -2,25 +2,26 @@
; RUN: opt %s -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_blendvpd(
-; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x double> [[A2:%.*]] to <2 x i64>
-; CHECK-NEXT: [[TMP5:%.*]] = ashr <2 x i64> [[TMP4]], splat (i64 63)
+; CHECK-NEXT: [[TMP13:%.*]] = bitcast <2 x double> [[A2:%.*]] to <2 x i64>
+; CHECK-NEXT: [[TMP5:%.*]] = ashr <2 x i64> [[TMP13]], splat (i64 63)
; CHECK-NEXT: [[TMP6:%.*]] = trunc <2 x i64> [[TMP5]] to <2 x i1>
-; CHECK-NEXT: [[TMP7:%.*]] = ashr <2 x i64> [[TMP1]], splat (i64 63)
+; CHECK-NEXT: [[TMP7:%.*]] = ashr <2 x i64> [[TMP4]], splat (i64 63)
; CHECK-NEXT: [[TMP8:%.*]] = trunc <2 x i64> [[TMP7]] to <2 x i1>
-; CHECK-NEXT: [[TMP9:%.*]] = select <2 x i1> [[TMP6]], <2 x i64> [[TMP2]], <2 x i64> [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = select <2 x i1> [[TMP6]], <2 x i64> [[TMP2]], <2 x i64> [[TMP12]]
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <2 x double> [[A1:%.*]] to <2 x i64>
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <2 x double> [[A0:%.*]] to <2 x i64>
-; CHECK-NEXT: [[TMP12:%.*]] = xor <2 x i64> [[TMP10]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = or <2 x i64> [[TMP12]], [[TMP2]]
-; CHECK-NEXT: [[TMP14:%.*]] = or <2 x i64> [[TMP13]], [[TMP3]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i64> [[TMP10]], [[TMP11]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i64> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP14:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP12]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <2 x i1> [[TMP8]], <2 x i64> [[TMP14]], <2 x i64> [[TMP9]]
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> [[A0]], <2 x double> [[A1]], <2 x double> [[A2]])
; CHECK-NEXT: store <2 x i64> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
@@ -34,21 +35,22 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_blendvps(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x float> [[A2:%.*]] to <4 x i32>
-; CHECK-NEXT: [[TMP5:%.*]] = ashr <4 x i32> [[TMP4]], splat (i32 31)
+; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x float> [[A2:%.*]] to <4 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = ashr <4 x i32> [[TMP13]], splat (i32 31)
; CHECK-NEXT: [[TMP6:%.*]] = trunc <4 x i32> [[TMP5]] to <4 x i1>
-; CHECK-NEXT: [[TMP7:%.*]] = ashr <4 x i32> [[TMP1]], splat (i32 31)
+; CHECK-NEXT: [[TMP7:%.*]] = ashr <4 x i32> [[TMP4]], splat (i32 31)
; CHECK-NEXT: [[TMP8:%.*]] = trunc <4 x i32> [[TMP7]] to <4 x i1>
-; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP2]], <4 x i32> [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP2]], <4 x i32> [[TMP12]]
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x float> [[A1:%.*]] to <4 x i32>
; CHECK-NEXT: [[TMP11:%.*]] = bitcast <4 x float> [[A0:%.*]] to <4 x i32>
-; CHECK-NEXT: [[TMP12:%.*]] = xor <4 x i32> [[TMP10]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = or <4 x i32> [[TMP12]], [[TMP2]]
-; CHECK-NEXT: [[TMP14:%.*]] = or <4 x i32> [[TMP13]], [[TMP3]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor <4 x i32> [[TMP10]], [[TMP11]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP14:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP12]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP14]], <4 x i32> [[TMP9]]
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> [[A0]], <4 x float> [[A1]], <4 x float> [[A2]])
; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
@@ -64,6 +66,7 @@ define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) #0
; CHECK-LABEL: @test_x86_sse41_dppd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = select <2 x i1> <i1 false, i1 true>, <2 x i64> [[TMP3]], <2 x i64> zeroinitializer
@@ -85,6 +88,7 @@ define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_dpps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> [[TMP3]], <4 x i32> zeroinitializer
@@ -106,17 +110,18 @@ define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) #0
; CHECK-LABEL: @test_x86_sse41_insertps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 17)
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -132,17 +137,18 @@ define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_mpsadbw(
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF1]]
-; CHECK: 5:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF1]]
+; CHECK: 6:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 6:
+; CHECK: 7:
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> [[A0:%.*]], <16 x i8> [[A1:%.*]], i8 7)
; CHECK-NEXT: store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i16> [[RES]]
@@ -156,16 +162,17 @@ define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_mpsadbw_load_op0(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP10:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A0:%.*]] = load <16 x i8>, ptr [[PTR:%.*]], align 16
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[PTR]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[_MSLD]] to i128
@@ -173,11 +180,11 @@ define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 {
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP9]], 0
; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP1]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF1]]
-; CHECK: 10:
+; CHECK-NEXT: br i1 [[_MSOR]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF1]]
+; CHECK: 11:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 11:
+; CHECK: 12:
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> [[A0]], <16 x i8> [[A1:%.*]], i8 7)
; CHECK-NEXT: store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i16> [[RES]]
@@ -191,6 +198,7 @@ define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_packusdw(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -209,6 +217,7 @@ declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readno
define <8 x i16> @test_x86_sse41_packusdw_fold() #0 {
; CHECK-LABEL: @test_x86_sse41_packusdw_fold(
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSPROP_VECTOR_PACK:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> zeroinitializer)
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
@@ -222,18 +231,19 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() #0 {
define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) #0 {
; CHECK-LABEL: @test_x86_sse41_pblendvb(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP4:%.*]] = ashr <16 x i8> [[A2:%.*]], splat (i8 7)
-; CHECK-NEXT: [[TMP5:%.*]] = trunc <16 x i8> [[TMP4]] to <16 x i1>
-; CHECK-NEXT: [[TMP6:%.*]] = ashr <16 x i8> [[TMP1]], splat (i8 7)
+; CHECK-NEXT: [[TMP10:%.*]] = ashr <16 x i8> [[A2:%.*]], splat (i8 7)
+; CHECK-NEXT: [[TMP5:%.*]] = trunc <16 x i8> [[TMP10]] to <16 x i1>
+; CHECK-NEXT: [[TMP6:%.*]] = ashr <16 x i8> [[TMP4]], splat (i8 7)
; CHECK-NEXT: [[TMP7:%.*]] = trunc <16 x i8> [[TMP6]] to <16 x i1>
-; CHECK-NEXT: [[TMP8:%.*]] = select <16 x i1> [[TMP5]], <16 x i8> [[TMP2]], <16 x i8> [[TMP3]]
-; CHECK-NEXT: [[TMP9:%.*]] = xor <16 x i8> [[A1:%.*]], [[A0:%.*]]
-; CHECK-NEXT: [[TMP10:%.*]] = or <16 x i8> [[TMP9]], [[TMP2]]
-; CHECK-NEXT: [[TMP11:%.*]] = or <16 x i8> [[TMP10]], [[TMP3]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <16 x i1> [[TMP5]], <16 x i8> [[TMP2]], <16 x i8> [[TMP9]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor <16 x i8> [[A1:%.*]], [[A0:%.*]]
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i8> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = or <16 x i8> [[_MSPROP]], [[TMP9]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP7]], <16 x i8> [[TMP11]], <16 x i8> [[TMP8]]
; CHECK-NEXT: [[RES:%.*]] = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A0]], <16 x i8> [[A1]], <16 x i8> [[A2]])
; CHECK-NEXT: store <16 x i8> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
@@ -248,6 +258,7 @@ declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) noun
define <8 x i16> @test_x86_sse41_phminposuw(<8 x i16> %a0) #0 {
; CHECK-LABEL: @test_x86_sse41_phminposuw(
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> [[A0:%.*]])
; CHECK-NEXT: store <8 x i16> [[TMP1]], ptr @__msan_retval_tls, align 8
@@ -263,6 +274,7 @@ define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestc(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -282,6 +294,7 @@ define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestnzc(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -301,6 +314,7 @@ define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_ptestz(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
@@ -319,14 +333,15 @@ declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) #0 {
; CHECK-LABEL: @test_x86_sse41_round_pd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> [[A0:%.*]], i32 7)
; CHECK-NEXT: store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <2 x double> [[RES]]
@@ -340,14 +355,15 @@ declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readno
define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) #0 {
; CHECK-LABEL: @test_x86_sse41_round_ps(
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP5:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> [[A0:%.*]], i32 7)
; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x float> [[RES]]
@@ -362,6 +378,7 @@ define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1)
; CHECK-LABEL: @test_x86_sse41_round_sd(
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP2]], <2 x i32> <i32 2, i32 1>
; CHECK-NEXT: [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i32 7)
@@ -378,16 +395,17 @@ define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0
; CHECK-LABEL: @test_x86_sse41_round_sd_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A1B:%.*]] = load <2 x double>, ptr [[A1:%.*]], align 16
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A1]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP7]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> [[_MSLD]], <2 x i32> <i32 2, i32 1>
@@ -405,16 +423,17 @@ define <4 x float> @test_x86_sse41_round_ss_load(<4 x float> %a0, ptr %a1) #0 {
; CHECK-LABEL: @test_x86_sse41_round_ss_load(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF1]]
-; CHECK: 3:
+; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP4:%.*]], label [[TMP9:%.*]], !prof [[PROF1]]
+; CHECK: 4:
; CHECK-NEXT: call void @__msan_warning_noreturn()
; CHECK-NEXT: unreachable
-; CHECK: 4:
+; CHECK: 5:
; CHECK-NEXT: [[A1B:%.*]] = load <4 x float>, ptr [[A1:%.*]], align 16
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[A1]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP7]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[_MSLD]], <4 x i32> <i32 4, i32 1, i32 2, i32 3>
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll
index 11c8da868bb6a1..7bc9cf3b8c5826 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll
@@ -5,11 +5,12 @@
; passed to a variadic function.
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
define dso_local i64 @many_args() {
; CHECK-LABEL: define dso_local i64 @many_args() {
; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -111,11 +112,28 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 776) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 784) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 792) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 56) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 88) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 112) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 120) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 144) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 152) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 160) to ptr), align 8
+; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 168) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 184) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
@@ -194,7 +212,7 @@ define dso_local i64 @many_args() {
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 776) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 784) to ptr), align 8
; CHECK-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) to ptr), align 8
-; CHECK-NEXT: store i64 1000, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i64 1040, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i64 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[RET:%.*]] = call i64 (i64, ...) @sum(i64 120, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1)
; CHECK-NEXT: [[_MSRET:%.*]] = load i64, ptr @__msan_retval_tls, align 8
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg.ll
index 9d86850bd52154..133d711764c718 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg.ll
@@ -9,19 +9,33 @@ target triple = "i386-unknown-linux-gnu"
define void @VaStart(ptr %s, ...) {
; CHECK-LABEL: define void @VaStart(
; CHECK-SAME: ptr [[S:%.*]], ...) {
-; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 4
+; CHECK-NEXT: [[TMP4:%.*]] = alloca i8, i64 [[TMP6]], align 8
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 0, i64 [[TMP6]], i1 false)
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP6]], i64 800)
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[VL]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], 2147483647
; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 4 [[TMP2]], i8 0, i32 4, i1 false)
+; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[VL]] to i32
+; CHECK-NEXT: [[TMP10:%.*]] = and i32 [[TMP9]], 2147483647
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i32 [[TMP10]] to ptr
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP11]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
+; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[VL]] to i32
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr
+; CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i32
+; CHECK-NEXT: [[TMP16:%.*]] = and i32 [[TMP15]], 2147483647
+; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i32 [[TMP16]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP17]], ptr align 4 [[TMP4]], i64 [[TMP6]], i1 false)
; CHECK-NEXT: ret void
;
; KERNEL-LABEL: define void @VaStart(
; KERNEL-SAME: ptr [[S:%.*]], ...) {
-; KERNEL-NEXT: [[ENTRY:.*:]]
; KERNEL-NEXT: [[TMP0:%.*]] = call ptr @__msan_get_context_state()
; KERNEL-NEXT: [[PARAM_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 0
; KERNEL-NEXT: [[RETVAL_SHADOW:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 1
@@ -30,13 +44,28 @@ define void @VaStart(ptr %s, ...) {
; KERNEL-NEXT: [[VA_ARG_OVERFLOW_SIZE:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 4
; KERNEL-NEXT: [[PARAM_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 5
; KERNEL-NEXT: [[RETVAL_ORIGIN:%.*]] = getelementptr { [100 x i64], [100 x i64], [100 x i64], [100 x i64], i64, [200 x i32], i32, i32 }, ptr [[TMP0]], i32 0, i32 6
+; KERNEL-NEXT: [[TMP2:%.*]] = load i64, ptr [[VA_ARG_OVERFLOW_SIZE]], align 4
+; KERNEL-NEXT: [[TMP3:%.*]] = alloca i8, i64 [[TMP2]], align 8
+; KERNEL-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 [[TMP2]], i1 false)
+; KERNEL-NEXT: [[TMP4:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 800)
+; KERNEL-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 [[VA_ARG_SHADOW]], i64 [[TMP4]], i1 false)
; KERNEL-NEXT: call void @llvm.donothing()
; KERNEL-NEXT: [[VL:%.*]] = alloca ptr, align 4
; KERNEL-NEXT: call void @__msan_unpoison_alloca(ptr [[VL]], i32 4)
+; KERNEL-NEXT: [[TMP7:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[VL]])
+; KERNEL-NEXT: [[TMP8:%.*]] = extractvalue { ptr, ptr } [[TMP7]], 0
+; KERNEL-NEXT: [[TMP9:%.*]] = extractvalue { ptr, ptr } [[TMP7]], 1
+; KERNEL-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP8]], i8 0, i64 4, i1 false)
; KERNEL-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
+; KERNEL-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[VL]] to i32
+; KERNEL-NEXT: [[TMP11:%.*]] = inttoptr i32 [[TMP10]] to ptr
+; KERNEL-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 4
+; KERNEL-NEXT: [[TMP13:%.*]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr [[TMP12]])
+; KERNEL-NEXT: [[TMP14:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 0
+; KERNEL-NEXT: [[TMP15:%.*]] = extractvalue { ptr, ptr } [[TMP13]], 1
+; KERNEL-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP14]], ptr align 4 [[TMP3]], i64 [[TMP2]], i1 false)
; KERNEL-NEXT: ret void
;
-entry:
%vl = alloca ptr, align 4
call void @llvm.va_start(ptr %vl)
ret void
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
index bcc9e6788d0ea5..b27ef5ddc93bff 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
@@ -7,7 +7,7 @@
; Test that shadow and origin are stored for variadic function params.
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
%struct.__va_list_tag = type { i32, i32, ptr, ptr }
@@ -15,15 +15,16 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr {
; CHECK-LABEL: define dso_local i32 @test(
; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]])
; CHECK-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
@@ -33,18 +34,16 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr {
; ORIGIN-LABEL: define dso_local i32 @test(
; ORIGIN-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) local_unnamed_addr {
; ORIGIN-NEXT: [[ENTRY:.*:]]
+; ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; ORIGIN-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
-; ORIGIN-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; ORIGIN-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]])
; ORIGIN-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
@@ -56,18 +55,16 @@ define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr {
; ORIGIN2-LABEL: define dso_local i32 @test(
; ORIGIN2-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) local_unnamed_addr {
; ORIGIN2-NEXT: [[ENTRY:.*:]]
+; ORIGIN2-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: call void @llvm.donothing()
; ORIGIN2-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; ORIGIN2-NEXT: store i32 0, ptr @__msan_va_arg_tls, align 8
; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
-; ORIGIN2-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; ORIGIN2-NEXT: store i64 24, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; ORIGIN2-NEXT: [[CALL:%.*]] = tail call i32 (i32, ...) @sum(i32 3, i32 [[A]], i32 [[B]], i32 [[C]])
; ORIGIN2-NEXT: [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
@@ -85,8 +82,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; CHECK-LABEL: define dso_local i32 @sum(
; CHECK-SAME: i32 [[N:%.*]], ...) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -94,32 +90,22 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 0, i64 24, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP25:%.*]] = xor i32 [[N]], -2147483648
; CHECK-NEXT: [[TMP26:%.*]] = and i32 [[TMP25]], -1
; CHECK-NEXT: [[TMP27:%.*]] = or i32 [[TMP25]], 0
@@ -150,7 +136,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; CHECK-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[REG_SAVE_AREA]], i64 [[TMP37]]
; CHECK-NEXT: [[TMP39:%.*]] = add i32 [[GP_OFFSET]], 8
; CHECK-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP41:%.*]] = xor i64 [[TMP40]], 87960930222080
+; CHECK-NEXT: [[TMP41:%.*]] = and i64 [[TMP40]], -2147483649
; CHECK-NEXT: [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
; CHECK-NEXT: store i32 0, ptr [[TMP42]], align 16
; CHECK-NEXT: store i32 [[TMP39]], ptr [[ARGS]], align 16
@@ -159,7 +145,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; CHECK-NEXT: [[OVERFLOW_ARG_AREA:%.*]] = load ptr, ptr [[OVERFLOW_ARG_AREA_P]], align 8
; CHECK-NEXT: [[OVERFLOW_ARG_AREA_NEXT:%.*]] = getelementptr i8, ptr [[OVERFLOW_ARG_AREA]], i64 8
; CHECK-NEXT: [[TMP43:%.*]] = ptrtoint ptr [[OVERFLOW_ARG_AREA_P]] to i64
-; CHECK-NEXT: [[TMP44:%.*]] = xor i64 [[TMP43]], 87960930222080
+; CHECK-NEXT: [[TMP44:%.*]] = and i64 [[TMP43]], -2147483649
; CHECK-NEXT: [[TMP45:%.*]] = inttoptr i64 [[TMP44]] to ptr
; CHECK-NEXT: store i64 0, ptr [[TMP45]], align 8
; CHECK-NEXT: store ptr [[OVERFLOW_ARG_AREA_NEXT]], ptr [[OVERFLOW_ARG_AREA_P]], align 8
@@ -186,55 +172,37 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN-LABEL: define dso_local i32 @sum(
; ORIGIN-SAME: i32 [[N:%.*]], ...) local_unnamed_addr {
; ORIGIN-NEXT: [[ENTRY:.*]]:
-; ORIGIN-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; ORIGIN-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; ORIGIN-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; ORIGIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
-; ORIGIN-NEXT: [[TMP4:%.*]] = alloca i8, i64 [[TMP1]], align 8
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[TMP3]], i1 false)
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; ORIGIN-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; ORIGIN-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; ORIGIN-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
-; ORIGIN-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416
+; ORIGIN-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 1073741824
; ORIGIN-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4
; ORIGIN-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP7]], i8 0, i64 24, i1 false)
; ORIGIN-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
-; ORIGIN-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080
-; ORIGIN-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
-; ORIGIN-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], 17592186044416
-; ORIGIN-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP13]], i8 0, i64 24, i1 false)
+; ORIGIN-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; ORIGIN-NEXT: [[TMP11:%.*]] = and i64 [[TMP23]], -2147483649
+; ORIGIN-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; ORIGIN-NEXT: [[TMP13:%.*]] = add i64 [[TMP11]], 1073741824
+; ORIGIN-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP12]], i8 0, i64 4, i1 false)
; ORIGIN-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; ORIGIN-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN-NEXT: [[TMP17:%.*]] = add i64 [[TMP16]], 16
-; ORIGIN-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-; ORIGIN-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8
-; ORIGIN-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP19]] to i64
-; ORIGIN-NEXT: [[TMP21:%.*]] = xor i64 [[TMP20]], 87960930222080
+; ORIGIN-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; ORIGIN-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; ORIGIN-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
+; ORIGIN-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64
+; ORIGIN-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -2147483649
+; ORIGIN-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
+; ORIGIN-NEXT: [[TMP21:%.*]] = add i64 [[TMP19]], 1073741824
; ORIGIN-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
-; ORIGIN-NEXT: [[TMP23:%.*]] = add i64 [[TMP21]], 17592186044416
-; ORIGIN-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP22]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP24]], ptr align 16 [[TMP4]], i64 176, i1 false)
-; ORIGIN-NEXT: [[TMP25:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN-NEXT: [[TMP26:%.*]] = add i64 [[TMP25]], 8
-; ORIGIN-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to ptr
-; ORIGIN-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8
-; ORIGIN-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP28]] to i64
-; ORIGIN-NEXT: [[TMP30:%.*]] = xor i64 [[TMP29]], 87960930222080
-; ORIGIN-NEXT: [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr
-; ORIGIN-NEXT: [[TMP32:%.*]] = add i64 [[TMP30]], 17592186044416
-; ORIGIN-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr
-; ORIGIN-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP31]], ptr align 16 [[TMP34]], i64 [[TMP0]], i1 false)
-; ORIGIN-NEXT: [[TMP35:%.*]] = getelementptr i8, ptr [[TMP4]], i32 176
-; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP33]], ptr align 16 [[TMP35]], i64 [[TMP0]], i1 false)
+; ORIGIN-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP20]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; ORIGIN-NEXT: [[TMP36:%.*]] = xor i32 [[N]], -2147483648
; ORIGIN-NEXT: [[TMP37:%.*]] = and i32 [[TMP36]], -1
; ORIGIN-NEXT: [[TMP38:%.*]] = or i32 [[TMP36]], 0
@@ -265,9 +233,9 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN-NEXT: [[TMP49:%.*]] = getelementptr i8, ptr [[REG_SAVE_AREA]], i64 [[TMP48]]
; ORIGIN-NEXT: [[TMP50:%.*]] = add i32 [[GP_OFFSET]], 8
; ORIGIN-NEXT: [[TMP51:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN-NEXT: [[TMP52:%.*]] = xor i64 [[TMP51]], 87960930222080
+; ORIGIN-NEXT: [[TMP52:%.*]] = and i64 [[TMP51]], -2147483649
; ORIGIN-NEXT: [[TMP53:%.*]] = inttoptr i64 [[TMP52]] to ptr
-; ORIGIN-NEXT: [[TMP54:%.*]] = add i64 [[TMP52]], 17592186044416
+; ORIGIN-NEXT: [[TMP54:%.*]] = add i64 [[TMP52]], 1073741824
; ORIGIN-NEXT: [[TMP55:%.*]] = inttoptr i64 [[TMP54]] to ptr
; ORIGIN-NEXT: store i32 0, ptr [[TMP53]], align 16
; ORIGIN-NEXT: store i32 [[TMP50]], ptr [[ARGS]], align 16
@@ -276,9 +244,9 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN-NEXT: [[OVERFLOW_ARG_AREA:%.*]] = load ptr, ptr [[OVERFLOW_ARG_AREA_P]], align 8
; ORIGIN-NEXT: [[OVERFLOW_ARG_AREA_NEXT:%.*]] = getelementptr i8, ptr [[OVERFLOW_ARG_AREA]], i64 8
; ORIGIN-NEXT: [[TMP56:%.*]] = ptrtoint ptr [[OVERFLOW_ARG_AREA_P]] to i64
-; ORIGIN-NEXT: [[TMP57:%.*]] = xor i64 [[TMP56]], 87960930222080
+; ORIGIN-NEXT: [[TMP57:%.*]] = and i64 [[TMP56]], -2147483649
; ORIGIN-NEXT: [[TMP58:%.*]] = inttoptr i64 [[TMP57]] to ptr
-; ORIGIN-NEXT: [[TMP59:%.*]] = add i64 [[TMP57]], 17592186044416
+; ORIGIN-NEXT: [[TMP59:%.*]] = add i64 [[TMP57]], 1073741824
; ORIGIN-NEXT: [[TMP60:%.*]] = inttoptr i64 [[TMP59]] to ptr
; ORIGIN-NEXT: store i64 0, ptr [[TMP58]], align 8
; ORIGIN-NEXT: store ptr [[OVERFLOW_ARG_AREA_NEXT]], ptr [[OVERFLOW_ARG_AREA_P]], align 8
@@ -306,55 +274,37 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN2-LABEL: define dso_local i32 @sum(
; ORIGIN2-SAME: i32 [[N:%.*]], ...) local_unnamed_addr {
; ORIGIN2-NEXT: [[ENTRY:.*]]:
-; ORIGIN2-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; ORIGIN2-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; ORIGIN2-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; ORIGIN2-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; ORIGIN2-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
; ORIGIN2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
-; ORIGIN2-NEXT: [[TMP4:%.*]] = alloca i8, i64 [[TMP1]], align 8
-; ORIGIN2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[TMP3]], i1 false)
; ORIGIN2-NEXT: call void @llvm.donothing()
; ORIGIN2-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; ORIGIN2-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN2-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; ORIGIN2-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; ORIGIN2-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
-; ORIGIN2-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416
+; ORIGIN2-NEXT: [[TMP8:%.*]] = add i64 [[TMP6]], 1073741824
; ORIGIN2-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4
; ORIGIN2-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; ORIGIN2-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP7]], i8 0, i64 24, i1 false)
; ORIGIN2-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
-; ORIGIN2-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN2-NEXT: [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080
-; ORIGIN2-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
-; ORIGIN2-NEXT: [[TMP14:%.*]] = add i64 [[TMP12]], 17592186044416
-; ORIGIN2-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-; ORIGIN2-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP13]], i8 0, i64 24, i1 false)
+; ORIGIN2-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; ORIGIN2-NEXT: [[TMP11:%.*]] = and i64 [[TMP23]], -2147483649
+; ORIGIN2-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; ORIGIN2-NEXT: [[TMP13:%.*]] = add i64 [[TMP11]], 1073741824
+; ORIGIN2-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; ORIGIN2-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP12]], i8 0, i64 4, i1 false)
; ORIGIN2-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
-; ORIGIN2-NEXT: [[TMP16:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN2-NEXT: [[TMP17:%.*]] = add i64 [[TMP16]], 16
-; ORIGIN2-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-; ORIGIN2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8
-; ORIGIN2-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP19]] to i64
-; ORIGIN2-NEXT: [[TMP21:%.*]] = xor i64 [[TMP20]], 87960930222080
+; ORIGIN2-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; ORIGIN2-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; ORIGIN2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
+; ORIGIN2-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64
+; ORIGIN2-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -2147483649
+; ORIGIN2-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
+; ORIGIN2-NEXT: [[TMP21:%.*]] = add i64 [[TMP19]], 1073741824
; ORIGIN2-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
-; ORIGIN2-NEXT: [[TMP23:%.*]] = add i64 [[TMP21]], 17592186044416
-; ORIGIN2-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-; ORIGIN2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP22]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; ORIGIN2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP24]], ptr align 16 [[TMP4]], i64 176, i1 false)
-; ORIGIN2-NEXT: [[TMP25:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN2-NEXT: [[TMP26:%.*]] = add i64 [[TMP25]], 8
-; ORIGIN2-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to ptr
-; ORIGIN2-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8
-; ORIGIN2-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP28]] to i64
-; ORIGIN2-NEXT: [[TMP30:%.*]] = xor i64 [[TMP29]], 87960930222080
-; ORIGIN2-NEXT: [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr
-; ORIGIN2-NEXT: [[TMP32:%.*]] = add i64 [[TMP30]], 17592186044416
-; ORIGIN2-NEXT: [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr
-; ORIGIN2-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; ORIGIN2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP31]], ptr align 16 [[TMP34]], i64 [[TMP0]], i1 false)
-; ORIGIN2-NEXT: [[TMP35:%.*]] = getelementptr i8, ptr [[TMP4]], i32 176
-; ORIGIN2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP33]], ptr align 16 [[TMP35]], i64 [[TMP0]], i1 false)
+; ORIGIN2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP20]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; ORIGIN2-NEXT: [[TMP36:%.*]] = xor i32 [[N]], -2147483648
; ORIGIN2-NEXT: [[TMP37:%.*]] = and i32 [[TMP36]], -1
; ORIGIN2-NEXT: [[TMP38:%.*]] = or i32 [[TMP36]], 0
@@ -385,9 +335,9 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN2-NEXT: [[TMP49:%.*]] = getelementptr i8, ptr [[REG_SAVE_AREA]], i64 [[TMP48]]
; ORIGIN2-NEXT: [[TMP50:%.*]] = add i32 [[GP_OFFSET]], 8
; ORIGIN2-NEXT: [[TMP51:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; ORIGIN2-NEXT: [[TMP52:%.*]] = xor i64 [[TMP51]], 87960930222080
+; ORIGIN2-NEXT: [[TMP52:%.*]] = and i64 [[TMP51]], -2147483649
; ORIGIN2-NEXT: [[TMP53:%.*]] = inttoptr i64 [[TMP52]] to ptr
-; ORIGIN2-NEXT: [[TMP54:%.*]] = add i64 [[TMP52]], 17592186044416
+; ORIGIN2-NEXT: [[TMP54:%.*]] = add i64 [[TMP52]], 1073741824
; ORIGIN2-NEXT: [[TMP55:%.*]] = inttoptr i64 [[TMP54]] to ptr
; ORIGIN2-NEXT: store i32 0, ptr [[TMP53]], align 16
; ORIGIN2-NEXT: store i32 [[TMP50]], ptr [[ARGS]], align 16
@@ -396,9 +346,9 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN2-NEXT: [[OVERFLOW_ARG_AREA:%.*]] = load ptr, ptr [[OVERFLOW_ARG_AREA_P]], align 8
; ORIGIN2-NEXT: [[OVERFLOW_ARG_AREA_NEXT:%.*]] = getelementptr i8, ptr [[OVERFLOW_ARG_AREA]], i64 8
; ORIGIN2-NEXT: [[TMP56:%.*]] = ptrtoint ptr [[OVERFLOW_ARG_AREA_P]] to i64
-; ORIGIN2-NEXT: [[TMP57:%.*]] = xor i64 [[TMP56]], 87960930222080
+; ORIGIN2-NEXT: [[TMP57:%.*]] = and i64 [[TMP56]], -2147483649
; ORIGIN2-NEXT: [[TMP58:%.*]] = inttoptr i64 [[TMP57]] to ptr
-; ORIGIN2-NEXT: [[TMP59:%.*]] = add i64 [[TMP57]], 17592186044416
+; ORIGIN2-NEXT: [[TMP59:%.*]] = add i64 [[TMP57]], 1073741824
; ORIGIN2-NEXT: [[TMP60:%.*]] = inttoptr i64 [[TMP59]] to ptr
; ORIGIN2-NEXT: store i64 0, ptr [[TMP58]], align 8
; ORIGIN2-NEXT: store ptr [[OVERFLOW_ARG_AREA_NEXT]], ptr [[OVERFLOW_ARG_AREA_P]], align 8
@@ -493,14 +443,15 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr {
; CHECK-LABEL: define dso_local i80 @test_i80(
; CHECK-SAME: i80 [[A:%.*]], i80 [[B:%.*]], i80 [[C:%.*]]) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
-; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8
+; CHECK-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: store i80 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]])
@@ -511,20 +462,15 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr {
; ORIGIN-LABEL: define dso_local i80 @test_i80(
; ORIGIN-SAME: i80 [[A:%.*]], i80 [[B:%.*]], i80 [[C:%.*]]) local_unnamed_addr {
; ORIGIN-NEXT: [[ENTRY:.*:]]
+; ORIGIN-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: call void @llvm.donothing()
; ORIGIN-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
-; ORIGIN-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr getelementptr (i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), i32 2), align 8
-; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; ORIGIN-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 192) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr getelementptr (i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 192) to ptr), i32 2), align 8
-; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8
-; ORIGIN-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 208) to ptr), align 8
-; ORIGIN-NEXT: store i32 0, ptr getelementptr (i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 208) to ptr), i32 2), align 8
+; ORIGIN-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8
+; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; ORIGIN-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
; ORIGIN-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN-NEXT: store i80 0, ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]])
@@ -537,20 +483,15 @@ define dso_local i80 @test_i80(i80 %a, i80 %b, i80 %c) local_unnamed_addr {
; ORIGIN2-LABEL: define dso_local i80 @test_i80(
; ORIGIN2-SAME: i80 [[A:%.*]], i80 [[B:%.*]], i80 [[C:%.*]]) local_unnamed_addr {
; ORIGIN2-NEXT: [[ENTRY:.*:]]
+; ORIGIN2-NEXT: [[TMP2:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: call void @llvm.donothing()
; ORIGIN2-NEXT: store i32 0, ptr @__msan_param_tls, align 8
; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
-; ORIGIN2-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), i32 2), align 8
-; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
-; ORIGIN2-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 192) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 192) to ptr), i32 2), align 8
-; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 208) to ptr), align 8
-; ORIGIN2-NEXT: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 208) to ptr), align 8
-; ORIGIN2-NEXT: store i32 0, ptr getelementptr (i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 208) to ptr), i32 2), align 8
+; ORIGIN2-NEXT: store i80 0, ptr @__msan_va_arg_tls, align 8
+; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; ORIGIN2-NEXT: store i80 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
; ORIGIN2-NEXT: store i64 48, ptr @__msan_va_arg_overflow_size_tls, align 8
; ORIGIN2-NEXT: store i80 0, ptr @__msan_retval_tls, align 8
; ORIGIN2-NEXT: [[CALL:%.*]] = tail call i80 (i32, ...) @sum_i80(i32 3, i80 [[A]], i80 [[B]], i80 [[C]])
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
index 205101564dfe09..aedefca2ef2b68 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
@@ -2,7 +2,7 @@
; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+target triple = "i386-unknown-linux-gnu"
%struct.Double4 = type { [4 x double] }
%struct.LongDouble2 = type { [2 x x86_fp80] }
@@ -17,15 +17,16 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef signext %arg) sanit
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIcEvT_(
; CHECK-SAME: i8 noundef signext [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG_ADDR:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -2147483649
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP3]], i8 -1, i64 1, i1 false)
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: store i8 [[TMP0]], ptr [[TMP6]], align 1
; CHECK-NEXT: store i8 [[ARG]], ptr [[ARG_ADDR]], align 1
@@ -33,7 +34,7 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef signext %arg) sanit
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARG_ADDR]], align 1
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i8, ptr [[TMP10]], align 1
; CHECK-NEXT: [[_MSPROP:%.*]] = sext i8 [[_MSLD]] to i32
@@ -41,8 +42,8 @@ define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef signext %arg) sanit
; CHECK-NEXT: store i8 [[_MSLD]], ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 [[_MSPROP]], ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef signext [[TMP7]], i32 noundef 1, i32 noundef [[CONV]])
; CHECK-NEXT: ret void
;
@@ -60,15 +61,16 @@ define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_mem
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIiEvT_(
; CHECK-SAME: i32 noundef [[ARG:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -2147483649
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 4, i1 false)
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: store i32 [[TMP0]], ptr [[TMP6]], align 4
; CHECK-NEXT: store i32 [[ARG]], ptr [[ARG_ADDR]], align 4
@@ -76,14 +78,14 @@ define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_mem
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARG_ADDR]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]])
; CHECK-NEXT: ret void
;
@@ -100,15 +102,16 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIfEvT_(
; CHECK-SAME: float noundef [[ARG:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG_ADDR:%.*]] = alloca float, align 4
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -2147483649
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 4, i1 false)
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: store i32 [[TMP0]], ptr [[TMP6]], align 4
; CHECK-NEXT: store float [[ARG]], ptr [[ARG_ADDR]], align 4
@@ -116,7 +119,7 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARG_ADDR]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64
@@ -124,8 +127,8 @@ define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_m
; CHECK-NEXT: store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i64 [[TMP11]], ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]])
; CHECK-NEXT: ret void
;
@@ -143,15 +146,16 @@ define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIdEvT_(
; CHECK-SAME: double noundef [[ARG:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG_ADDR:%.*]] = alloca double, align 8
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -2147483649
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 8, i1 false)
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: store i64 [[TMP0]], ptr [[TMP6]], align 8
; CHECK-NEXT: store double [[ARG]], ptr [[ARG_ADDR]], align 8
@@ -159,14 +163,14 @@ define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
; CHECK-NEXT: [[TMP7:%.*]] = load double, ptr [[ARG_ADDR]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]])
; CHECK-NEXT: ret void
;
@@ -183,15 +187,16 @@ define linkonce_odr dso_local void @_Z4testIeEvT_(x86_fp80 noundef %arg) sanitiz
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIeEvT_(
; CHECK-SAME: x86_fp80 noundef [[ARG:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP11:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i80, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG_ADDR:%.*]] = alloca x86_fp80, align 16
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -2147483649
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP3]], i8 -1, i64 16, i1 false)
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: store i80 [[TMP0]], ptr [[TMP6]], align 16
; CHECK-NEXT: store x86_fp80 [[ARG]], ptr [[ARG_ADDR]], align 16
@@ -199,13 +204,13 @@ define linkonce_odr dso_local void @_Z4testIeEvT_(x86_fp80 noundef %arg) sanitiz
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
; CHECK-NEXT: [[TMP7:%.*]] = load x86_fp80, ptr [[ARG_ADDR]], align 16
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
-; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i80, ptr [[TMP10]], align 16
; CHECK-NEXT: store i80 [[_MSLD]], ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), align 8
+; CHECK-NEXT: store i80 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (x86_fp80, i32, ...) @_Z5test2IeEvT_iz(x86_fp80 noundef [[TMP7]], i32 noundef 1, x86_fp80 noundef [[TMP7]])
; CHECK-NEXT: ret void
@@ -223,15 +228,16 @@ define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitiz
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(
; CHECK-SAME: i64 [[ARG_COERCE:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_INTINT:%.*]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -2147483649
; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 8, i1 false)
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: store i64 [[TMP0]], ptr [[TMP6]], align 8
; CHECK-NEXT: store i64 [[ARG_COERCE]], ptr [[ARG]], align 8
@@ -239,14 +245,14 @@ define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitiz
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG]])
; CHECK-NEXT: [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[ARG]], align 8
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 8, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]])
; CHECK-NEXT: ret void
;
@@ -263,22 +269,23 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(
; CHECK-SAME: i64 [[ARG_COERCE0:%.*]], i64 [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_INT64INT64:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: store i64 [[TMP0]], ptr [[TMP7]], align 8
; CHECK-NEXT: store i64 [[ARG_COERCE0]], ptr [[ARG]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[ARG]], i64 0, i32 1
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[TMP8]] to i64
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
; CHECK-NEXT: store i64 [[TMP1]], ptr [[TMP11]], align 8
; CHECK-NEXT: store i64 [[ARG_COERCE1]], ptr [[TMP8]], align 8
@@ -286,12 +293,12 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG]])
; CHECK-NEXT: [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[ARG]], align 8
; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
+; CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], -2147483649
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
; CHECK-NEXT: [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[TMP8]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP8]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], -2147483649
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
@@ -299,9 +306,9 @@ define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (i64, i64, i32, ...) @_Z5test2I10Int64Int64EvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
;
@@ -321,22 +328,23 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(
; CHECK-SAME: double [[ARG_COERCE0:%.*]], double [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEDOUBLE:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: store i64 [[TMP0]], ptr [[TMP7]], align 8
; CHECK-NEXT: store double [[ARG_COERCE0]], ptr [[ARG]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds { double, double }, ptr [[ARG]], i64 0, i32 1
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[TMP8]] to i64
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
; CHECK-NEXT: store i64 [[TMP1]], ptr [[TMP11]], align 8
; CHECK-NEXT: store double [[ARG_COERCE1]], ptr [[TMP8]], align 8
@@ -344,12 +352,12 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG]])
; CHECK-NEXT: [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[ARG]], align 8
; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
+; CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], -2147483649
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
; CHECK-NEXT: [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load double, ptr [[TMP8]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP8]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], -2147483649
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
@@ -357,9 +365,9 @@ define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerc
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, double, i32, ...) @_Z5test2I12DoubleDoubleEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
;
@@ -380,25 +388,26 @@ define linkonce_odr dso_local void @_Z4testI7Double4EvT_(ptr noundef byval(%stru
; CHECK-SAME: ptr noundef byval([[STRUCT_DOUBLE4:%.*]]) align 8 [[ARG:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], -2147483649
; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_param_tls, i64 32, i1 false)
+; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG]])
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false)
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
+; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false)
-; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
-; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 32, i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP13]], i64 32, i1 false)
; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I7Double4EvT_iz(ptr noundef nonnull byval([[STRUCT_DOUBLE4]]) align 8 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_DOUBLE4]]) align 8 [[ARG]])
; CHECK-NEXT: ret void
@@ -415,22 +424,23 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce
; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(
; CHECK-SAME: double [[ARG_COERCE0:%.*]], float [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARG:%.*]] = alloca [[STRUCT_DOUBLEFLOAT:%.*]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], -2147483649
; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: store i64 [[TMP0]], ptr [[TMP7]], align 8
; CHECK-NEXT: store double [[ARG_COERCE0]], ptr [[ARG]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds { double, float }, ptr [[ARG]], i64 0, i32 1
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[TMP8]] to i64
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
; CHECK-NEXT: store i32 [[TMP1]], ptr [[TMP11]], align 8
; CHECK-NEXT: store float [[ARG_COERCE1]], ptr [[TMP8]], align 8
@@ -438,12 +448,12 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG]])
; CHECK-NEXT: [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[ARG]], align 8
; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
+; CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], -2147483649
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
; CHECK-NEXT: [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load float, ptr [[TMP8]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP8]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], -2147483649
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: [[_MSLD1:%.*]] = load i32, ptr [[TMP17]], align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
@@ -451,9 +461,9 @@ define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
-; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
-; CHECK-NEXT: store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT: store i64 [[_MSLD]], ptr @__msan_va_arg_tls, align 8
+; CHECK-NEXT: store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT: store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (double, float, i32, ...) @_Z5test2I11DoubleFloatEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]])
; CHECK-NEXT: ret void
;
@@ -474,25 +484,26 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_(ptr noundef byval(
; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE2:%.*]]) align 16 [[ARG:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], -2147483649
; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_param_tls, i64 32, i1 false)
+; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG]])
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false)
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
+; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false)
-; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
-; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 32, i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP13]], i64 32, i1 false)
; CHECK-NEXT: store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble2EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE2]]) align 16 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE2]]) align 16 [[ARG]])
; CHECK-NEXT: ret void
@@ -508,25 +519,26 @@ define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_(ptr noundef byval(
; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE4:%.*]]) align 16 [[ARG:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], -2147483649
; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_param_tls, i64 64, i1 false)
+; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG]])
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false)
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
+; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
-; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP11]], -2147483649
+; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP13]], i64 64, i1 false)
; CHECK-NEXT: store i64 64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]])
; CHECK-NEXT: ret void
@@ -543,8 +555,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IcEvT_iz(
; CHECK-SAME: i8 noundef signext [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -553,31 +564,21 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -606,8 +607,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
; CHECK-SAME: i32 noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -616,31 +616,21 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -661,8 +651,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IfEvT_iz(
; CHECK-SAME: float noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -671,31 +660,21 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -716,8 +695,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IdEvT_iz(
; CHECK-SAME: double noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -726,31 +704,21 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -771,8 +739,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IeEvT_iz(
; CHECK-SAME: x86_fp80 noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -781,31 +748,21 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -826,8 +783,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(
; CHECK-SAME: i64 [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -836,31 +792,21 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -881,8 +827,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(
; CHECK-SAME: i64 [[T_COERCE0:%.*]], i64 [[T_COERCE1:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -891,31 +836,21 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -936,8 +871,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(
; CHECK-SAME: double [[T_COERCE0:%.*]], double [[T_COERCE1:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -946,31 +880,21 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -991,8 +915,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(
; CHECK-SAME: ptr noundef byval([[STRUCT_DOUBLE4:%.*]]) align 8 [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -1001,31 +924,21 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -1046,8 +959,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(
; CHECK-SAME: double [[T_COERCE0:%.*]], float [[T_COERCE1:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -1056,31 +968,21 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -1101,8 +1003,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(
; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE2:%.*]]) align 16 [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -1111,31 +1012,21 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -1156,8 +1047,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(
; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE4:%.*]]) align 16 [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
@@ -1166,31 +1056,21 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], 16
-; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
-; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
-; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
-; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], 8
-; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
-; CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
-; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
-; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64
+; CHECK-NEXT: [[TMP14:%.*]] = and i64 [[TMP13]], -2147483649
+; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[TMP2]], i64 [[TMP1]], i1 false)
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
@@ -1212,98 +1092,110 @@ define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval
; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE4:%.*]]) align 16 [[ARG:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], -2147483649
; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_param_tls, i64 64, i1 false)
+; CHECK-NEXT: [[TMP39:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARG]])
; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483649
; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false)
; CHECK-NEXT: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
+; CHECK-NEXT: [[TMP7:%.*]] = and i64 [[TMP6]], -2147483649
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false)
; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT: [[TMP10:%.*]] = and i64 [[TMP9]], -2147483649
; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false)
; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
+; CHECK-NEXT: [[TMP13:%.*]] = and i64 [[TMP12]], -2147483649
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), ptr align 8 [[TMP14]], i64 64, i1 false)
; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], -2147483649
; CHECK-NEXT: [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), ptr align 8 [[TMP17]], i64 64, i1 false)
; CHECK-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], 87960930222080
+; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP18]], -2147483649
; CHECK-NEXT: [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), ptr align 8 [[TMP20]], i64 64, i1 false)
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT: [[TMP22:%.*]] = and i64 [[TMP21]], -2147483649
; CHECK-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), ptr align 8 [[TMP23]], i64 64, i1 false)
; CHECK-NEXT: [[TMP24:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], 87960930222080
+; CHECK-NEXT: [[TMP25:%.*]] = and i64 [[TMP24]], -2147483649
; CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), ptr align 8 [[TMP26]], i64 64, i1 false)
; CHECK-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP28:%.*]] = xor i64 [[TMP27]], 87960930222080
+; CHECK-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], -2147483649
; CHECK-NEXT: [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), ptr align 8 [[TMP29]], i64 64, i1 false)
; CHECK-NEXT: [[TMP30:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP31:%.*]] = xor i64 [[TMP30]], 87960930222080
+; CHECK-NEXT: [[TMP31:%.*]] = and i64 [[TMP30]], -2147483649
; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), ptr align 8 [[TMP32]], i64 64, i1 false)
; CHECK-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP34:%.*]] = xor i64 [[TMP33]], 87960930222080
+; CHECK-NEXT: [[TMP34:%.*]] = and i64 [[TMP33]], -2147483649
; CHECK-NEXT: [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), ptr align 8 [[TMP35]], i64 64, i1 false)
; CHECK-NEXT: [[TMP36:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP37:%.*]] = xor i64 [[TMP36]], 87960930222080
+; CHECK-NEXT: [[TMP37:%.*]] = and i64 [[TMP36]], -2147483649
; CHECK-NEXT: [[TMP38:%.*]] = inttoptr i64 [[TMP37]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), ptr align 8 [[TMP38]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP40:%.*]] = xor i64 [[TMP39]], 87960930222080
-; CHECK-NEXT: [[TMP41:%.*]] = inttoptr i64 [[TMP40]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP41]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP43:%.*]] = xor i64 [[TMP42]], 87960930222080
-; CHECK-NEXT: [[TMP44:%.*]] = inttoptr i64 [[TMP43]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), ptr align 8 [[TMP44]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP46:%.*]] = xor i64 [[TMP45]], 87960930222080
-; CHECK-NEXT: [[TMP47:%.*]] = inttoptr i64 [[TMP46]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), ptr align 8 [[TMP47]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP48:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP49:%.*]] = xor i64 [[TMP48]], 87960930222080
-; CHECK-NEXT: [[TMP50:%.*]] = inttoptr i64 [[TMP49]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), ptr align 8 [[TMP50]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP51:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP52:%.*]] = xor i64 [[TMP51]], 87960930222080
-; CHECK-NEXT: [[TMP53:%.*]] = inttoptr i64 [[TMP52]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), ptr align 8 [[TMP53]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP54:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP55:%.*]] = xor i64 [[TMP54]], 87960930222080
-; CHECK-NEXT: [[TMP56:%.*]] = inttoptr i64 [[TMP55]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), ptr align 8 [[TMP56]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP57:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP58:%.*]] = xor i64 [[TMP57]], 87960930222080
-; CHECK-NEXT: [[TMP59:%.*]] = inttoptr i64 [[TMP58]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), ptr align 8 [[TMP59]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP60:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP61:%.*]] = xor i64 [[TMP60]], 87960930222080
-; CHECK-NEXT: [[TMP62:%.*]] = inttoptr i64 [[TMP61]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), ptr align 8 [[TMP62]], i64 64, i1 false)
-; CHECK-NEXT: [[TMP63:%.*]] = ptrtoint ptr [[ARG]] to i64
-; CHECK-NEXT: [[TMP64:%.*]] = xor i64 [[TMP63]], 87960930222080
-; CHECK-NEXT: [[TMP65:%.*]] = inttoptr i64 [[TMP64]] to ptr
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), ptr align 8 [[TMP65]], i64 64, i1 false)
-; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 752) to ptr), i8 0, i32 48, i1 false)
+; CHECK-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP42:%.*]] = and i64 [[TMP41]], -2147483649
+; CHECK-NEXT: [[TMP43:%.*]] = inttoptr i64 [[TMP42]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 [[TMP43]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP45:%.*]] = and i64 [[TMP44]], -2147483649
+; CHECK-NEXT: [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), ptr align 8 [[TMP46]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP47:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP48:%.*]] = and i64 [[TMP47]], -2147483649
+; CHECK-NEXT: [[TMP49:%.*]] = inttoptr i64 [[TMP48]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 128) to ptr), ptr align 8 [[TMP49]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP50:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP51:%.*]] = and i64 [[TMP50]], -2147483649
+; CHECK-NEXT: [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), ptr align 8 [[TMP52]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP53:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP54:%.*]] = and i64 [[TMP53]], -2147483649
+; CHECK-NEXT: [[TMP55:%.*]] = inttoptr i64 [[TMP54]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), ptr align 8 [[TMP55]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP56:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP57:%.*]] = and i64 [[TMP56]], -2147483649
+; CHECK-NEXT: [[TMP58:%.*]] = inttoptr i64 [[TMP57]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), ptr align 8 [[TMP58]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP59:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP60:%.*]] = and i64 [[TMP59]], -2147483649
+; CHECK-NEXT: [[TMP61:%.*]] = inttoptr i64 [[TMP60]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), ptr align 8 [[TMP61]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP62:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP63:%.*]] = and i64 [[TMP62]], -2147483649
+; CHECK-NEXT: [[TMP64:%.*]] = inttoptr i64 [[TMP63]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), ptr align 8 [[TMP64]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP65:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP66:%.*]] = and i64 [[TMP65]], -2147483649
+; CHECK-NEXT: [[TMP67:%.*]] = inttoptr i64 [[TMP66]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), ptr align 8 [[TMP67]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP68:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP69:%.*]] = and i64 [[TMP68]], -2147483649
+; CHECK-NEXT: [[TMP70:%.*]] = inttoptr i64 [[TMP69]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), ptr align 8 [[TMP70]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP71:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP72:%.*]] = and i64 [[TMP71]], -2147483649
+; CHECK-NEXT: [[TMP73:%.*]] = inttoptr i64 [[TMP72]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), ptr align 8 [[TMP73]], i64 64, i1 false)
+; CHECK-NEXT: [[TMP74:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT: [[TMP75:%.*]] = and i64 [[TMP74]], -2147483649
+; CHECK-NEXT: [[TMP76:%.*]] = inttoptr i64 [[TMP75]] to ptr
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), ptr align 8 [[TMP76]], i64 64, i1 false)
; CHECK-NEXT: store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8
; CHECK-NEXT: call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 20, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]])
; CHECK-NEXT: ret void
More information about the llvm-commits
mailing list