[llvm] 37db332 - [test][msan] Precommit tests for vararg improvements (#72612)

via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 17 12:37:27 PST 2023


Author: Vitaly Buka
Date: 2023-11-17T12:37:20-08:00
New Revision: 37db332cf9aaf6047da9cc07ff544a4f91f49d24

URL: https://github.com/llvm/llvm-project/commit/37db332cf9aaf6047da9cc07ff544a4f91f49d24
DIFF: https://github.com/llvm/llvm-project/commit/37db332cf9aaf6047da9cc07ff544a4f91f49d24.diff

LOG: [test][msan] Precommit tests for vararg improvements (#72612)

Added: 
    compiler-rt/test/msan/vararg_shadow.cpp
    llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
    llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/compiler-rt/test/msan/vararg_shadow.cpp b/compiler-rt/test/msan/vararg_shadow.cpp
new file mode 100644
index 000000000000000..0c1e5e8d6369c3a
--- /dev/null
+++ b/compiler-rt/test/msan/vararg_shadow.cpp
@@ -0,0 +1,254 @@
+// Check that shadow of retrieved value from va_list matches the shadow of passed value.
+
+// Without -fno-sanitize-memory-param-retval we can't even pass poisoned values.
+// RUN: %clangxx_msan -fno-sanitize-memory-param-retval -fsanitize-memory-track-origins=0 -O3 %s -o %t
+
+// Nothing works yet.
+// XFAIL: *
+
+#include <sanitizer/msan_interface.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#ifdef DEBUG_VARARG_SHADOW_TEST
+__attribute__((noinline, no_sanitize("memory"))) void
+printb(const void *p, size_t n, int line, int align) {
+  fprintf(stderr, "\n%p at line %d: \n", p, line);
+  for (int i = 0; i < n;) {
+    fprintf(stderr, "%p: ", (void *)(((uint8_t *)p) + i));
+    for (int j = 0; j < align; ++i, ++j)
+      fprintf(stderr, "%02x ", ((uint8_t *)p)[i]);
+    fprintf(stderr, "\n");
+  }
+}
+
+struct my_va_list {
+#  ifdef __ARM_ARCH_ISA_A64
+  void *stack;
+  void *gr_top;
+  void *vr_top;
+  int gr_offs;
+  int vr_offs;
+#  else
+  unsigned int gp_offset;
+  unsigned int fp_offset;
+  void *overflow_arg_area;
+  void *reg_save_area;
+#  endif
+};
+
+__attribute__((noinline, no_sanitize("memory"))) void printva(const void *p,
+                                                              int line) {
+  my_va_list *pp = (my_va_list *)p;
+#  ifdef __ARM_ARCH_ISA_A64
+  fprintf(stderr,
+          "\nva %p at line %d: stack : %p\n gr_top: %p\n vr_top: %p\n gr_offs: "
+          "%d\n "
+          "vr_offs: %d\n",
+          p, line, pp->stack, pp->gr_top, pp->vr_top, pp->gr_offs, pp->vr_offs);
+
+  printb((char *)pp->gr_top + pp->gr_offs, -pp->gr_offs, __LINE__, 8);
+  printb((char *)pp->vr_top + pp->vr_offs, -pp->vr_offs, __LINE__, 16);
+  printb((char *)pp->stack, 256, __LINE__, 8);
+#  else
+  fprintf(stderr,
+          "\nva %p at line %d:\n gp_offset: %u\n fp_offset: %u\n "
+          "overflow_arg_area: %p\n reg_save_area: %p\n\n",
+          p, line, pp->gp_offset, pp->fp_offset, pp->overflow_arg_area,
+          pp->reg_save_area);
+
+  printb((char *)pp->reg_save_area + pp->gp_offset,
+         pp->fp_offset - pp->gp_offset, __LINE__, 8);
+  printb((char *)pp->reg_save_area + pp->fp_offset, 128, __LINE__, 16);
+  printb((char *)pp->overflow_arg_area, 256, __LINE__, 8);
+#  endif
+}
+
+__attribute__((noinline, no_sanitize("memory"))) void printtls(int line) {
+  uint8_t tmp[kMsanParamTlsSize];
+  for (int i = 0; i < kMsanParamTlsSize; ++i)
+    tmp[i] = __msan_va_arg_tls[i];
+  fprintf(stderr, "\nTLS at line %d: ", line);
+  for (int i = 0; i < kMsanParamTlsSize;) {
+    fprintf(stderr, "\n");
+    for (int j = 0; j < 16; ++i, ++j)
+      fprintf(stderr, "%02x ", tmp[i]);
+  }
+
+  fprintf(stderr, "\n");
+}
+#endif // DEBUG_VARARG_SHADOW_TEST
+
+const int kMsanParamTlsSize = 800;
+extern "C" __thread uint8_t __msan_va_arg_tls[];
+
+struct IntInt {
+  int a;
+  int b;
+};
+
+struct Int64Int64 {
+  int64_t a;
+  int64_t b;
+};
+
+struct DoubleDouble {
+  double a;
+  double b;
+};
+
+struct Double4 {
+  double a[4];
+};
+
+struct DoubleFloat {
+  double a;
+  float b;
+};
+
+struct LongDouble2 {
+  long double a[2];
+};
+
+struct LongDouble4 {
+  long double a[4];
+};
+
+template <class T>
+__attribute__((noinline)) void print_shadow(va_list &args, int n,
+                                            const char *function) {
+  for (int i = 0; i < n; i++) {
+    // 1-based to make it 
diff erent from clean shadow.
+    fprintf(stderr, "\nArgShadow fn:%s n:%d i:%02x ", function, n, i + 1);
+    T arg_int = va_arg(args, T);
+    if (__msan_test_shadow(&arg_int, sizeof(arg_int)))
+      fprintf(stderr, "fake[clean] %02x", i + 1);
+    else
+      __msan_dump_shadow(&arg_int, sizeof(arg_int));
+#ifdef DEBUG_VARARG_SHADOW_TEST
+    printb(&arg_int, sizeof(arg_int), __LINE__, 16);
+#endif
+  }
+}
+
+template <class T> __attribute__((noinline)) void test1(int n, ...) {
+#ifdef DEBUG_VARARG_SHADOW_TEST
+  printtls(__LINE__);
+#endif
+  va_list args;
+  va_start(args, n);
+#ifdef DEBUG_VARARG_SHADOW_TEST
+  printva(&args, __LINE__);
+#endif
+  print_shadow<T>(args, n, __FUNCTION__);
+  va_end(args);
+}
+
+template <class T> __attribute__((noinline)) void test2(T t, int n, ...) {
+#ifdef DEBUG_VARARG_SHADOW_TEST
+  printtls(__LINE__);
+#endif
+  va_list args;
+  va_start(args, n);
+#ifdef DEBUG_VARARG_SHADOW_TEST
+  printva(&args, __LINE__);
+#endif
+  print_shadow<T>(args, n, __FUNCTION__);
+  va_end(args);
+}
+
+template <class T> __attribute__((noinline)) void test() {
+  // Array of values we will pass into variadic functions.
+  static T args[32] = {};
+
+  // Poison values making the fist byte of the item shadow match the index.
+  // E.g. item 3 should be poisoned as '03 ff ff ff'.
+  memset(args, 0xff, sizeof(args));
+  __msan_poison(args, sizeof(args));
+  for (int i = 0; i < 32; ++i) {
+    char *first = (char *)(&args[i]);
+    *first = char(*(int *)(first)&i);
+  }
+#ifdef DEBUG_VARARG_SHADOW_TEST
+  __msan_print_shadow(args, sizeof(args));
+#endif
+
+  // Now we will check that index, printed like 'i:03' will match
+  // '0x123abc[0x123abc] 03 ff ff ff'
+  memset(__msan_va_arg_tls, 0xee, kMsanParamTlsSize);
+  test1<T>(1, args[1]);
+  // CHECK-COUNT-1: ArgShadow fn:test1 n:1 i:[[ARGI:[[:xdigit:]]{2}]] {{[^]]+}}] [[ARGI]]
+
+  memset(__msan_va_arg_tls, 0xee, kMsanParamTlsSize);
+  test1<T>(4, args[1], args[2], args[3], args[4]);
+  // CHECK-COUNT-4: ArgShadow fn:test1 n:4 i:[[ARGI:[[:xdigit:]]{2}]] {{[^]]+}}] [[ARGI]]
+
+  memset(__msan_va_arg_tls, 0xee, kMsanParamTlsSize);
+  test1<T>(20, args[1], args[2], args[3], args[4], args[5], args[6], args[7],
+           args[8], args[9], args[10], args[11], args[12], args[13], args[14],
+           args[15], args[16], args[17], args[18], args[19], args[20]);
+  // CHECK-COUNT-20: ArgShadow fn:test1 n:20 i:[[ARGI:[[:xdigit:]]{2}]] {{[^]]+}}] [[ARGI]]
+
+  memset(__msan_va_arg_tls, 0xee, kMsanParamTlsSize);
+  test2<T>(args[31], 1, args[1]);
+  // CHECK-COUNT-1: ArgShadow fn:test2 n:1 i:[[ARGI:[[:xdigit:]]{2}]] {{[^]]+}}] [[ARGI]]
+
+  memset(__msan_va_arg_tls, 0xee, kMsanParamTlsSize);
+  test2<T>(args[31], 4, args[1], args[2], args[3], args[4]);
+  // CHECK-COUNT-4: ArgShadow fn:test2 n:4 i:[[ARGI:[[:xdigit:]]{2}]] {{[^]]+}}] [[ARGI]]
+
+  memset(__msan_va_arg_tls, 0xee, kMsanParamTlsSize);
+  test2<T>(args[31], 20, args[1], args[2], args[3], args[4], args[5], args[6],
+           args[7], args[8], args[9], args[10], args[11], args[12], args[13],
+           args[14], args[15], args[16], args[17], args[18], args[19],
+           args[20]);
+  // CHECK-COUNT-20: ArgShadow fn:test2 n:20 i:[[ARGI:[[:xdigit:]]{2}]] {{[^]]+}}] [[ARGI]]
+}
+
+int main(int argc, char *argv[]) {
+#define TEST(T...)                                                             \
+  if (argc == 2 && strcmp(argv[1], #T) == 0) {                                 \
+    test<T>();                                                                 \
+    return 0;                                                                  \
+  }
+
+  TEST(char);
+  // RUN: %run %t char 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(int);
+  // RUN: %run %t int 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(float);
+  // RUN: %run %t float 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(double);
+  // RUN: %run %t double 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(long double);
+  // RUN: %run %t "long double" 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(IntInt);
+  // RUN: %run %t IntInt 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(Int64Int64);
+  // RUN: %run %t Int64Int64 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(DoubleDouble);
+  // RUN: %run %t DoubleDouble 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(Double4);
+  // RUN: %run %t Double4 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(DoubleFloat);
+  // RUN: %run %t DoubleFloat 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(LongDouble2);
+  // RUN: %run %t LongDouble2 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  TEST(LongDouble4);
+  // RUN: %run %t LongDouble4 2>&1 | FileCheck %s --implicit-check-not="ArgShadow" --check-prefixes=CHECK
+
+  return 1;
+}

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
new file mode 100644
index 000000000000000..66f4d61f444f34e
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
@@ -0,0 +1,1890 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-grtev4-linux-gnu"
+
+%struct.IntInt = type { i32, i32 }
+%struct.Int64Int64 = type { i64, i64 }
+%struct.DoubleDouble = type { double, double }
+%struct.Double4 = type { [4 x double] }
+%struct.DoubleFloat = type { double, float }
+%struct.LongDouble2 = type { [2 x fp128] }
+%struct.LongDouble4 = type { [4 x fp128] }
+%"struct.std::__va_list" = type { ptr, ptr, ptr, i32, i32 }
+
+define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIcEvT_(
+; CHECK-SAME: i8 noundef [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca i8, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 1, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i8 [[TMP0]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    store i8 [[ARG]], ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i8, ptr [[TMP10]], align 4
+; CHECK-NEXT:    [[_MSPROP:%.*]] = zext i8 [[_MSLD]] to i32
+; CHECK-NEXT:    [[CONV:%.*]] = zext i8 [[TMP7]] to i32
+; CHECK-NEXT:    store i8 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef [[TMP7]], i32 noundef 1, i32 noundef [[CONV]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca i8, align 4
+  store i8 %arg, ptr %arg.addr, align 4
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load i8, ptr %arg.addr, align 4
+  %conv = zext i8 %0 to i32
+  call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef %0, i32 noundef 1, i32 noundef %conv)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIiEvT_(
+; CHECK-SAME: i32 noundef [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 4, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i32 [[TMP0]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    store i32 [[ARG]], ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca i32, align 4
+  store i32 %arg, ptr %arg.addr, align 4
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load i32, ptr %arg.addr, align 4
+  call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef %0, i32 noundef 1, i32 noundef %0)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIfEvT_(
+; CHECK-SAME: float noundef [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca float, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 4, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i32 [[TMP0]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    store float [[ARG]], ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load float, ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64
+; CHECK-NEXT:    [[CONV:%.*]] = fpext float [[TMP7]] to double
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca float, align 4
+  store float %arg, ptr %arg.addr, align 4
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load float, ptr %arg.addr, align 4
+  %conv = fpext float %0 to double
+  call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef %0, i32 noundef 1, double noundef %conv)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIdEvT_(
+; CHECK-SAME: double noundef [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca double, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 8, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[TMP6]], align 8
+; CHECK-NEXT:    store double [[ARG]], ptr [[ARG_ADDR]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load double, ptr [[ARG_ADDR]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca double, align 8
+  store double %arg, ptr %arg.addr, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load double, ptr %arg.addr, align 8
+  call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef %0, i32 noundef 1, double noundef %0)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testIeEvT_(fp128 noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIeEvT_(
+; CHECK-SAME: fp128 noundef [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca fp128, align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP3]], i8 -1, i64 16, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i128 [[TMP0]], ptr [[TMP6]], align 16
+; CHECK-NEXT:    store fp128 [[ARG]], ptr [[ARG_ADDR]], align 16
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load fp128, ptr [[ARG_ADDR]], align 16
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i128, ptr [[TMP10]], align 16
+; CHECK-NEXT:    store i128 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i128 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store i128 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (fp128, i32, ...) @_Z5test2IeEvT_iz(fp128 noundef [[TMP7]], i32 noundef 1, fp128 noundef [[TMP7]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca fp128, align 16
+  store fp128 %arg, ptr %arg.addr, align 16
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load fp128, ptr %arg.addr, align 16
+  call void (fp128, i32, ...) @_Z5test2IeEvT_iz(fp128 noundef %0, i32 noundef 1, fp128 noundef %0)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(
+; CHECK-SAME: i64 [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_INTINT:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 8, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[TMP6]], align 8
+; CHECK-NEXT:    store i64 [[ARG_COERCE]], ptr [[ARG]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.IntInt, align 8
+  store i64 %arg.coerce, ptr %arg, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load i64, ptr %arg, align 8
+  call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 %agg.tmp.sroa.0.0.copyload, i32 noundef 1, i64 %agg.tmp.sroa.0.0.copyload)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_([2 x i64] %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(
+; CHECK-SAME: [2 x i64] [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load [2 x i64], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_INT64INT64:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 16, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue [2 x i64] [[TMP0]], 0
+; CHECK-NEXT:    [[ARG_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[ARG_COERCE]], 0
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i64 [[TMP4]], ptr [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[ARG_COERCE_FCA_0_EXTRACT]], ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = extractvalue [2 x i64] [[TMP0]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[ARG_COERCE]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[ARG]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i64 [[TMP8]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    store i64 [[ARG_COERCE_FCA_1_EXTRACT]], ptr [[ARG_COERCE_FCA_1_GEP]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[ARG_COERCE_FCA_1_GEP]], align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 193514046488576
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP18:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[_MSLD]], 0
+; CHECK-NEXT:    [[DOTFCA_0_INSERT2:%.*]] = insertvalue [2 x i64] poison, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1
+; CHECK-NEXT:    [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x i64] [[DOTFCA_0_INSERT2]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void ([2 x i64], i32, ...) @_Z5test2I10Int64Int64EvT_iz([2 x i64] [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x i64] [[DOTFCA_1_INSERT3]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.Int64Int64, align 8
+  %arg.coerce.fca.0.extract = extractvalue [2 x i64] %arg.coerce, 0
+  store i64 %arg.coerce.fca.0.extract, ptr %arg, align 8
+  %arg.coerce.fca.1.extract = extractvalue [2 x i64] %arg.coerce, 1
+  %arg.coerce.fca.1.gep = getelementptr inbounds [2 x i64], ptr %arg, i64 0, i64 1
+  store i64 %arg.coerce.fca.1.extract, ptr %arg.coerce.fca.1.gep, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load i64, ptr %arg, align 8
+  %agg.tmp.sroa.2.0.copyload = load i64, ptr %arg.coerce.fca.1.gep, align 8
+  %.fca.0.insert2 = insertvalue [2 x i64] poison, i64 %agg.tmp.sroa.0.0.copyload, 0
+  %.fca.1.insert3 = insertvalue [2 x i64] %.fca.0.insert2, i64 %agg.tmp.sroa.2.0.copyload, 1
+  call void ([2 x i64], i32, ...) @_Z5test2I10Int64Int64EvT_iz([2 x i64] %.fca.1.insert3, i32 noundef 1, [2 x i64] %.fca.1.insert3)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_([2 x double] alignstack(8) %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(
+; CHECK-SAME: [2 x double] alignstack(8) [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load [2 x i64], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_DOUBLEDOUBLE:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 16, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue [2 x i64] [[TMP0]], 0
+; CHECK-NEXT:    [[ARG_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x double] [[ARG_COERCE]], 0
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i64 [[TMP4]], ptr [[TMP7]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE_FCA_0_EXTRACT]], ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = extractvalue [2 x i64] [[TMP0]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x double] [[ARG_COERCE]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_GEP:%.*]] = getelementptr inbounds [2 x double], ptr [[ARG]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i64 [[TMP8]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE_FCA_1_EXTRACT]], ptr [[ARG_COERCE_FCA_1_GEP]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load double, ptr [[ARG_COERCE_FCA_1_GEP]], align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 193514046488576
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP18:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[_MSLD]], 0
+; CHECK-NEXT:    [[DOTFCA_0_INSERT2:%.*]] = insertvalue [2 x double] poison, double [[AGG_TMP_SROA_0_0_COPYLOAD]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1
+; CHECK-NEXT:    [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x double] [[DOTFCA_0_INSERT2]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void ([2 x double], i32, ...) @_Z5test2I12DoubleDoubleEvT_iz([2 x double] alignstack(8) [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x double] alignstack(8) [[DOTFCA_1_INSERT3]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.DoubleDouble, align 8
+  %arg.coerce.fca.0.extract = extractvalue [2 x double] %arg.coerce, 0
+  store double %arg.coerce.fca.0.extract, ptr %arg, align 8
+  %arg.coerce.fca.1.extract = extractvalue [2 x double] %arg.coerce, 1
+  %arg.coerce.fca.1.gep = getelementptr inbounds [2 x double], ptr %arg, i64 0, i64 1
+  store double %arg.coerce.fca.1.extract, ptr %arg.coerce.fca.1.gep, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load double, ptr %arg, align 8
+  %agg.tmp.sroa.2.0.copyload = load double, ptr %arg.coerce.fca.1.gep, align 8
+  %.fca.0.insert2 = insertvalue [2 x double] poison, double %agg.tmp.sroa.0.0.copyload, 0
+  %.fca.1.insert3 = insertvalue [2 x double] %.fca.0.insert2, double %agg.tmp.sroa.2.0.copyload, 1
+  call void ([2 x double], i32, ...) @_Z5test2I12DoubleDoubleEvT_iz([2 x double] alignstack(8) %.fca.1.insert3, i32 noundef 1, [2 x double] alignstack(8) %.fca.1.insert3)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI7Double4EvT_([4 x double] alignstack(8) %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI7Double4EvT_(
+; CHECK-SAME: [4 x double] alignstack(8) [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load [4 x i64], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_DOUBLE4:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue [4 x i64] [[TMP0]], 0
+; CHECK-NEXT:    [[ARG_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x double] [[ARG_COERCE]], 0
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i64 [[TMP4]], ptr [[TMP7]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE_FCA_0_EXTRACT]], ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = extractvalue [4 x i64] [[TMP0]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x double] [[ARG_COERCE]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_GEP:%.*]] = getelementptr inbounds [4 x double], ptr [[ARG]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i64 [[TMP8]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE_FCA_1_EXTRACT]], ptr [[ARG_COERCE_FCA_1_GEP]], align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = extractvalue [4 x i64] [[TMP0]], 2
+; CHECK-NEXT:    [[ARG_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x double] [[ARG_COERCE]], 2
+; CHECK-NEXT:    [[ARG_COERCE_FCA_2_GEP:%.*]] = getelementptr inbounds [4 x double], ptr [[ARG]], i64 0, i64 2
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_2_GEP]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store i64 [[TMP12]], ptr [[TMP15]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE_FCA_2_EXTRACT]], ptr [[ARG_COERCE_FCA_2_GEP]], align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = extractvalue [4 x i64] [[TMP0]], 3
+; CHECK-NEXT:    [[ARG_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x double] [[ARG_COERCE]], 3
+; CHECK-NEXT:    [[ARG_COERCE_FCA_3_GEP:%.*]] = getelementptr inbounds [4 x double], ptr [[ARG]], i64 0, i64 3
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_3_GEP]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = xor i64 [[TMP17]], 193514046488576
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store i64 [[TMP16]], ptr [[TMP19]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE_FCA_3_EXTRACT]], ptr [[ARG_COERCE_FCA_3_GEP]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = xor i64 [[TMP20]], 193514046488576
+; CHECK-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP22]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load double, ptr [[ARG_COERCE_FCA_1_GEP]], align 8
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = xor i64 [[TMP23]], 193514046488576
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i64, ptr [[TMP25]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_3_0_COPYLOAD:%.*]] = load double, ptr [[ARG_COERCE_FCA_2_GEP]], align 8
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_2_GEP]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = xor i64 [[TMP26]], 193514046488576
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[_MSLD2:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_4_0_COPYLOAD:%.*]] = load double, ptr [[ARG_COERCE_FCA_3_GEP]], align 8
+; CHECK-NEXT:    [[TMP29:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_3_GEP]] to i64
+; CHECK-NEXT:    [[TMP30:%.*]] = xor i64 [[TMP29]], 193514046488576
+; CHECK-NEXT:    [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr
+; CHECK-NEXT:    [[_MSLD3:%.*]] = load i64, ptr [[TMP31]], align 8
+; CHECK-NEXT:    [[TMP32:%.*]] = insertvalue [4 x i64] [i64 -1, i64 -1, i64 -1, i64 -1], i64 [[_MSLD]], 0
+; CHECK-NEXT:    [[DOTFCA_0_INSERT4:%.*]] = insertvalue [4 x double] poison, double [[AGG_TMP_SROA_0_0_COPYLOAD]], 0
+; CHECK-NEXT:    [[TMP33:%.*]] = insertvalue [4 x i64] [[TMP32]], i64 [[_MSLD1]], 1
+; CHECK-NEXT:    [[DOTFCA_1_INSERT5:%.*]] = insertvalue [4 x double] [[DOTFCA_0_INSERT4]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
+; CHECK-NEXT:    [[TMP34:%.*]] = insertvalue [4 x i64] [[TMP33]], i64 [[_MSLD2]], 2
+; CHECK-NEXT:    [[DOTFCA_2_INSERT6:%.*]] = insertvalue [4 x double] [[DOTFCA_1_INSERT5]], double [[AGG_TMP_SROA_3_0_COPYLOAD]], 2
+; CHECK-NEXT:    [[TMP35:%.*]] = insertvalue [4 x i64] [[TMP34]], i64 [[_MSLD3]], 3
+; CHECK-NEXT:    [[DOTFCA_3_INSERT7:%.*]] = insertvalue [4 x double] [[DOTFCA_2_INSERT6]], double [[AGG_TMP_SROA_4_0_COPYLOAD]], 3
+; CHECK-NEXT:    store [4 x i64] [[TMP35]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    store [4 x i64] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    store [4 x i64] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT:    store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void ([4 x double], i32, ...) @_Z5test2I7Double4EvT_iz([4 x double] alignstack(8) [[DOTFCA_3_INSERT7]], i32 noundef 1, [4 x double] alignstack(8) [[DOTFCA_3_INSERT7]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.Double4, align 8
+  %arg.coerce.fca.0.extract = extractvalue [4 x double] %arg.coerce, 0
+  store double %arg.coerce.fca.0.extract, ptr %arg, align 8
+  %arg.coerce.fca.1.extract = extractvalue [4 x double] %arg.coerce, 1
+  %arg.coerce.fca.1.gep = getelementptr inbounds [4 x double], ptr %arg, i64 0, i64 1
+  store double %arg.coerce.fca.1.extract, ptr %arg.coerce.fca.1.gep, align 8
+  %arg.coerce.fca.2.extract = extractvalue [4 x double] %arg.coerce, 2
+  %arg.coerce.fca.2.gep = getelementptr inbounds [4 x double], ptr %arg, i64 0, i64 2
+  store double %arg.coerce.fca.2.extract, ptr %arg.coerce.fca.2.gep, align 8
+  %arg.coerce.fca.3.extract = extractvalue [4 x double] %arg.coerce, 3
+  %arg.coerce.fca.3.gep = getelementptr inbounds [4 x double], ptr %arg, i64 0, i64 3
+  store double %arg.coerce.fca.3.extract, ptr %arg.coerce.fca.3.gep, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load double, ptr %arg, align 8
+  %agg.tmp.sroa.2.0.copyload = load double, ptr %arg.coerce.fca.1.gep, align 8
+  %agg.tmp.sroa.3.0.copyload = load double, ptr %arg.coerce.fca.2.gep, align 8
+  %agg.tmp.sroa.4.0.copyload = load double, ptr %arg.coerce.fca.3.gep, align 8
+  %.fca.0.insert4 = insertvalue [4 x double] poison, double %agg.tmp.sroa.0.0.copyload, 0
+  %.fca.1.insert5 = insertvalue [4 x double] %.fca.0.insert4, double %agg.tmp.sroa.2.0.copyload, 1
+  %.fca.2.insert6 = insertvalue [4 x double] %.fca.1.insert5, double %agg.tmp.sroa.3.0.copyload, 2
+  %.fca.3.insert7 = insertvalue [4 x double] %.fca.2.insert6, double %agg.tmp.sroa.4.0.copyload, 3
+  call void ([4 x double], i32, ...) @_Z5test2I7Double4EvT_iz([4 x double] alignstack(8) %.fca.3.insert7, i32 noundef 1, [4 x double] alignstack(8) %.fca.3.insert7)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_([2 x i64] %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(
+; CHECK-SAME: [2 x i64] [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load [2 x i64], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_DOUBLEFLOAT:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 16, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue [2 x i64] [[TMP0]], 0
+; CHECK-NEXT:    [[ARG_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x i64] [[ARG_COERCE]], 0
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i64 [[TMP4]], ptr [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[ARG_COERCE_FCA_0_EXTRACT]], ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = extractvalue [2 x i64] [[TMP0]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x i64] [[ARG_COERCE]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_GEP:%.*]] = getelementptr inbounds [2 x i64], ptr [[ARG]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i64 [[TMP8]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    store i64 [[ARG_COERCE_FCA_1_EXTRACT]], ptr [[ARG_COERCE_FCA_1_GEP]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[ARG_COERCE_FCA_1_GEP]], align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 193514046488576
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP18:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[_MSLD]], 0
+; CHECK-NEXT:    [[DOTFCA_0_INSERT2:%.*]] = insertvalue [2 x i64] poison, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = insertvalue [2 x i64] [[TMP18]], i64 [[_MSLD1]], 1
+; CHECK-NEXT:    [[DOTFCA_1_INSERT3:%.*]] = insertvalue [2 x i64] [[DOTFCA_0_INSERT2]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store [2 x i64] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void ([2 x i64], i32, ...) @_Z5test2I11DoubleFloatEvT_iz([2 x i64] [[DOTFCA_1_INSERT3]], i32 noundef 1, [2 x i64] [[DOTFCA_1_INSERT3]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.DoubleFloat, align 8
+  %arg.coerce.fca.0.extract = extractvalue [2 x i64] %arg.coerce, 0
+  store i64 %arg.coerce.fca.0.extract, ptr %arg, align 8
+  %arg.coerce.fca.1.extract = extractvalue [2 x i64] %arg.coerce, 1
+  %arg.coerce.fca.1.gep = getelementptr inbounds [2 x i64], ptr %arg, i64 0, i64 1
+  store i64 %arg.coerce.fca.1.extract, ptr %arg.coerce.fca.1.gep, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load i64, ptr %arg, align 8
+  %agg.tmp.sroa.2.0.copyload = load i64, ptr %arg.coerce.fca.1.gep, align 8
+  %.fca.0.insert2 = insertvalue [2 x i64] poison, i64 %agg.tmp.sroa.0.0.copyload, 0
+  %.fca.1.insert3 = insertvalue [2 x i64] %.fca.0.insert2, i64 %agg.tmp.sroa.2.0.copyload, 1
+  call void ([2 x i64], i32, ...) @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %.fca.1.insert3, i32 noundef 1, [2 x i64] %.fca.1.insert3)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_([2 x fp128] alignstack(16) %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_(
+; CHECK-SAME: [2 x fp128] alignstack(16) [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load [2 x i128], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_LONGDOUBLE2:%.*]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP3]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue [2 x i128] [[TMP0]], 0
+; CHECK-NEXT:    [[ARG_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [2 x fp128] [[ARG_COERCE]], 0
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i128 [[TMP4]], ptr [[TMP7]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_0_EXTRACT]], ptr [[ARG]], align 16
+; CHECK-NEXT:    [[TMP8:%.*]] = extractvalue [2 x i128] [[TMP0]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [2 x fp128] [[ARG_COERCE]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_GEP:%.*]] = getelementptr inbounds [2 x fp128], ptr [[ARG]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i128 [[TMP8]], ptr [[TMP11]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_1_EXTRACT]], ptr [[ARG_COERCE_FCA_1_GEP]], align 16
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG]], align 16
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i128, ptr [[TMP14]], align 16
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG_COERCE_FCA_1_GEP]], align 16
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 193514046488576
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i128, ptr [[TMP17]], align 16
+; CHECK-NEXT:    [[TMP18:%.*]] = insertvalue [2 x i128] [i128 -1, i128 -1], i128 [[_MSLD]], 0
+; CHECK-NEXT:    [[DOTFCA_0_INSERT4:%.*]] = insertvalue [2 x fp128] poison, fp128 [[AGG_TMP_SROA_0_0_COPYLOAD]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = insertvalue [2 x i128] [[TMP18]], i128 [[_MSLD1]], 1
+; CHECK-NEXT:    [[DOTFCA_1_INSERT5:%.*]] = insertvalue [2 x fp128] [[DOTFCA_0_INSERT4]], fp128 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
+; CHECK-NEXT:    store [2 x i128] [[TMP19]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    store [2 x i128] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    store [2 x i128] [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT:    store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void ([2 x fp128], i32, ...) @_Z5test2I11LongDouble2EvT_iz([2 x fp128] alignstack(16) [[DOTFCA_1_INSERT5]], i32 noundef 1, [2 x fp128] alignstack(16) [[DOTFCA_1_INSERT5]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.LongDouble2, align 16
+  %arg.coerce.fca.0.extract = extractvalue [2 x fp128] %arg.coerce, 0
+  store fp128 %arg.coerce.fca.0.extract, ptr %arg, align 16
+  %arg.coerce.fca.1.extract = extractvalue [2 x fp128] %arg.coerce, 1
+  %arg.coerce.fca.1.gep = getelementptr inbounds [2 x fp128], ptr %arg, i64 0, i64 1
+  store fp128 %arg.coerce.fca.1.extract, ptr %arg.coerce.fca.1.gep, align 16
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load fp128, ptr %arg, align 16
+  %agg.tmp.sroa.2.0.copyload = load fp128, ptr %arg.coerce.fca.1.gep, align 16
+  %.fca.0.insert4 = insertvalue [2 x fp128] poison, fp128 %agg.tmp.sroa.0.0.copyload, 0
+  %.fca.1.insert5 = insertvalue [2 x fp128] %.fca.0.insert4, fp128 %agg.tmp.sroa.2.0.copyload, 1
+  call void ([2 x fp128], i32, ...) @_Z5test2I11LongDouble2EvT_iz([2 x fp128] alignstack(16) %.fca.1.insert5, i32 noundef 1, [2 x fp128] alignstack(16) %.fca.1.insert5)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_([4 x fp128] alignstack(16) %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_(
+; CHECK-SAME: [4 x fp128] alignstack(16) [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load [4 x i128], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_LONGDOUBLE4:%.*]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP3]], i8 -1, i64 64, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue [4 x i128] [[TMP0]], 0
+; CHECK-NEXT:    [[ARG_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x fp128] [[ARG_COERCE]], 0
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i128 [[TMP4]], ptr [[TMP7]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_0_EXTRACT]], ptr [[ARG]], align 16
+; CHECK-NEXT:    [[TMP8:%.*]] = extractvalue [4 x i128] [[TMP0]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x fp128] [[ARG_COERCE]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_GEP:%.*]] = getelementptr inbounds [4 x fp128], ptr [[ARG]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i128 [[TMP8]], ptr [[TMP11]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_1_EXTRACT]], ptr [[ARG_COERCE_FCA_1_GEP]], align 16
+; CHECK-NEXT:    [[TMP12:%.*]] = extractvalue [4 x i128] [[TMP0]], 2
+; CHECK-NEXT:    [[ARG_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x fp128] [[ARG_COERCE]], 2
+; CHECK-NEXT:    [[ARG_COERCE_FCA_2_GEP:%.*]] = getelementptr inbounds [4 x fp128], ptr [[ARG]], i64 0, i64 2
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_2_GEP]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store i128 [[TMP12]], ptr [[TMP15]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_2_EXTRACT]], ptr [[ARG_COERCE_FCA_2_GEP]], align 16
+; CHECK-NEXT:    [[TMP16:%.*]] = extractvalue [4 x i128] [[TMP0]], 3
+; CHECK-NEXT:    [[ARG_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x fp128] [[ARG_COERCE]], 3
+; CHECK-NEXT:    [[ARG_COERCE_FCA_3_GEP:%.*]] = getelementptr inbounds [4 x fp128], ptr [[ARG]], i64 0, i64 3
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_3_GEP]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = xor i64 [[TMP17]], 193514046488576
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store i128 [[TMP16]], ptr [[TMP19]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_3_EXTRACT]], ptr [[ARG_COERCE_FCA_3_GEP]], align 16
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG]], align 16
+; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = xor i64 [[TMP20]], 193514046488576
+; CHECK-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i128, ptr [[TMP22]], align 16
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG_COERCE_FCA_1_GEP]], align 16
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = xor i64 [[TMP23]], 193514046488576
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i128, ptr [[TMP25]], align 16
+; CHECK-NEXT:    [[AGG_TMP_SROA_3_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG_COERCE_FCA_2_GEP]], align 16
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_2_GEP]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = xor i64 [[TMP26]], 193514046488576
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[_MSLD2:%.*]] = load i128, ptr [[TMP28]], align 16
+; CHECK-NEXT:    [[AGG_TMP_SROA_4_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG_COERCE_FCA_3_GEP]], align 16
+; CHECK-NEXT:    [[TMP29:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_3_GEP]] to i64
+; CHECK-NEXT:    [[TMP30:%.*]] = xor i64 [[TMP29]], 193514046488576
+; CHECK-NEXT:    [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr
+; CHECK-NEXT:    [[_MSLD3:%.*]] = load i128, ptr [[TMP31]], align 16
+; CHECK-NEXT:    [[TMP32:%.*]] = insertvalue [4 x i128] [i128 -1, i128 -1, i128 -1, i128 -1], i128 [[_MSLD]], 0
+; CHECK-NEXT:    [[DOTFCA_0_INSERT4:%.*]] = insertvalue [4 x fp128] poison, fp128 [[AGG_TMP_SROA_0_0_COPYLOAD]], 0
+; CHECK-NEXT:    [[TMP33:%.*]] = insertvalue [4 x i128] [[TMP32]], i128 [[_MSLD1]], 1
+; CHECK-NEXT:    [[DOTFCA_1_INSERT5:%.*]] = insertvalue [4 x fp128] [[DOTFCA_0_INSERT4]], fp128 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
+; CHECK-NEXT:    [[TMP34:%.*]] = insertvalue [4 x i128] [[TMP33]], i128 [[_MSLD2]], 2
+; CHECK-NEXT:    [[DOTFCA_2_INSERT6:%.*]] = insertvalue [4 x fp128] [[DOTFCA_1_INSERT5]], fp128 [[AGG_TMP_SROA_3_0_COPYLOAD]], 2
+; CHECK-NEXT:    [[TMP35:%.*]] = insertvalue [4 x i128] [[TMP34]], i128 [[_MSLD3]], 3
+; CHECK-NEXT:    [[DOTFCA_3_INSERT7:%.*]] = insertvalue [4 x fp128] [[DOTFCA_2_INSERT6]], fp128 [[AGG_TMP_SROA_4_0_COPYLOAD]], 3
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT:    store i64 64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) [[DOTFCA_3_INSERT7]], i32 noundef 1, [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT7]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.LongDouble4, align 16
+  %arg.coerce.fca.0.extract = extractvalue [4 x fp128] %arg.coerce, 0
+  store fp128 %arg.coerce.fca.0.extract, ptr %arg, align 16
+  %arg.coerce.fca.1.extract = extractvalue [4 x fp128] %arg.coerce, 1
+  %arg.coerce.fca.1.gep = getelementptr inbounds [4 x fp128], ptr %arg, i64 0, i64 1
+  store fp128 %arg.coerce.fca.1.extract, ptr %arg.coerce.fca.1.gep, align 16
+  %arg.coerce.fca.2.extract = extractvalue [4 x fp128] %arg.coerce, 2
+  %arg.coerce.fca.2.gep = getelementptr inbounds [4 x fp128], ptr %arg, i64 0, i64 2
+  store fp128 %arg.coerce.fca.2.extract, ptr %arg.coerce.fca.2.gep, align 16
+  %arg.coerce.fca.3.extract = extractvalue [4 x fp128] %arg.coerce, 3
+  %arg.coerce.fca.3.gep = getelementptr inbounds [4 x fp128], ptr %arg, i64 0, i64 3
+  store fp128 %arg.coerce.fca.3.extract, ptr %arg.coerce.fca.3.gep, align 16
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load fp128, ptr %arg, align 16
+  %agg.tmp.sroa.2.0.copyload = load fp128, ptr %arg.coerce.fca.1.gep, align 16
+  %agg.tmp.sroa.3.0.copyload = load fp128, ptr %arg.coerce.fca.2.gep, align 16
+  %agg.tmp.sroa.4.0.copyload = load fp128, ptr %arg.coerce.fca.3.gep, align 16
+  %.fca.0.insert4 = insertvalue [4 x fp128] poison, fp128 %agg.tmp.sroa.0.0.copyload, 0
+  %.fca.1.insert5 = insertvalue [4 x fp128] %.fca.0.insert4, fp128 %agg.tmp.sroa.2.0.copyload, 1
+  %.fca.2.insert6 = insertvalue [4 x fp128] %.fca.1.insert5, fp128 %agg.tmp.sroa.3.0.copyload, 2
+  %.fca.3.insert7 = insertvalue [4 x fp128] %.fca.2.insert6, fp128 %agg.tmp.sroa.4.0.copyload, 3
+  call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) %.fca.3.insert7, i32 noundef 1, [4 x fp128] alignstack(16) %.fca.3.insert7)
+  ret void
+}
+
+declare void @_Z3usePv(ptr noundef) local_unnamed_addr #2
+
+define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IcEvT_iz(
+; CHECK-SAME: i8 noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+
+declare void @llvm.va_start(ptr) #4
+
+declare void @llvm.va_end(ptr) #4
+
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+
+define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
+; CHECK-SAME: i32 noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IfEvT_iz(
+; CHECK-SAME: float noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IdEvT_iz(
+; CHECK-SAME: double noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IeEvT_iz(
+; CHECK-SAME: fp128 noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(
+; CHECK-SAME: i64 [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coerce, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(
+; CHECK-SAME: [2 x i64] [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] alignstack(8) %t.coerce, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(
+; CHECK-SAME: [2 x double] alignstack(8) [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignstack(8) %t.coerce, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(
+; CHECK-SAME: [4 x double] alignstack(8) [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.coerce, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(
+; CHECK-SAME: [2 x i64] [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] alignstack(16) %t.coerce, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(
+; CHECK-SAME: [2 x fp128] alignstack(16) [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) %t.coerce, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(
+; CHECK-SAME: [4 x fp128] alignstack(16) [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 192, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP6]], i8 -1, i64 32, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 0
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP15]], 8
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = add i64 [[TMP19]], 24
+; CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = sext i32 [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = add i64 [[TMP18]], [[TMP23]]
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP26]], 16
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr [[TMP28]], align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP30]], 28
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = sext i32 [[TMP33]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = add i64 [[TMP29]], [[TMP34]]
+; CHECK-NEXT:    [[TMP36:%.*]] = inttoptr i64 [[TMP35]] to ptr
+; CHECK-NEXT:    [[TMP37:%.*]] = add i64 64, [[TMP23]]
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 193514046488576
+; CHECK-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP37]]
+; CHECK-NEXT:    [[TMP42:%.*]] = sub i64 64, [[TMP37]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP40]], ptr align 8 [[TMP41]], i64 [[TMP42]], i1 false)
+; CHECK-NEXT:    [[TMP43:%.*]] = add i64 128, [[TMP34]]
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 193514046488576
+; CHECK-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 64
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 [[TMP43]]
+; CHECK-NEXT:    [[TMP49:%.*]] = sub i64 128, [[TMP43]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP46]], ptr align 8 [[TMP48]], i64 [[TMP49]], i1 false)
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP51:%.*]] = xor i64 [[TMP50]], 193514046488576
+; CHECK-NEXT:    [[TMP52:%.*]] = inttoptr i64 [[TMP51]] to ptr
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 192
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP52]], ptr align 16 [[TMP53]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca %"struct.std::__va_list", align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4test2I11LongDouble4EvT_([4 x fp128] alignstack(16) %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4test2I11LongDouble4EvT_(
+; CHECK-SAME: [4 x fp128] alignstack(16) [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load [4 x i128], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_LONGDOUBLE4:%.*]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 193514046488576
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP3]], i8 -1, i64 64, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = extractvalue [4 x i128] [[TMP0]], 0
+; CHECK-NEXT:    [[ARG_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x fp128] [[ARG_COERCE]], 0
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 193514046488576
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i128 [[TMP4]], ptr [[TMP7]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_0_EXTRACT]], ptr [[ARG]], align 16
+; CHECK-NEXT:    [[TMP8:%.*]] = extractvalue [4 x i128] [[TMP0]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x fp128] [[ARG_COERCE]], 1
+; CHECK-NEXT:    [[ARG_COERCE_FCA_1_GEP:%.*]] = getelementptr inbounds [4 x fp128], ptr [[ARG]], i64 0, i64 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i128 [[TMP8]], ptr [[TMP11]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_1_EXTRACT]], ptr [[ARG_COERCE_FCA_1_GEP]], align 16
+; CHECK-NEXT:    [[TMP12:%.*]] = extractvalue [4 x i128] [[TMP0]], 2
+; CHECK-NEXT:    [[ARG_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x fp128] [[ARG_COERCE]], 2
+; CHECK-NEXT:    [[ARG_COERCE_FCA_2_GEP:%.*]] = getelementptr inbounds [4 x fp128], ptr [[ARG]], i64 0, i64 2
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_2_GEP]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store i128 [[TMP12]], ptr [[TMP15]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_2_EXTRACT]], ptr [[ARG_COERCE_FCA_2_GEP]], align 16
+; CHECK-NEXT:    [[TMP16:%.*]] = extractvalue [4 x i128] [[TMP0]], 3
+; CHECK-NEXT:    [[ARG_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x fp128] [[ARG_COERCE]], 3
+; CHECK-NEXT:    [[ARG_COERCE_FCA_3_GEP:%.*]] = getelementptr inbounds [4 x fp128], ptr [[ARG]], i64 0, i64 3
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_3_GEP]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = xor i64 [[TMP17]], 193514046488576
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store i128 [[TMP16]], ptr [[TMP19]], align 16
+; CHECK-NEXT:    store fp128 [[ARG_COERCE_FCA_3_EXTRACT]], ptr [[ARG_COERCE_FCA_3_GEP]], align 16
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG]], align 16
+; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = xor i64 [[TMP20]], 193514046488576
+; CHECK-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i128, ptr [[TMP22]], align 16
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG_COERCE_FCA_1_GEP]], align 16
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_1_GEP]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = xor i64 [[TMP23]], 193514046488576
+; CHECK-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i128, ptr [[TMP25]], align 16
+; CHECK-NEXT:    [[AGG_TMP_SROA_3_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG_COERCE_FCA_2_GEP]], align 16
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_2_GEP]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = xor i64 [[TMP26]], 193514046488576
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
+; CHECK-NEXT:    [[_MSLD2:%.*]] = load i128, ptr [[TMP28]], align 16
+; CHECK-NEXT:    [[AGG_TMP_SROA_4_0_COPYLOAD:%.*]] = load fp128, ptr [[ARG_COERCE_FCA_3_GEP]], align 16
+; CHECK-NEXT:    [[TMP29:%.*]] = ptrtoint ptr [[ARG_COERCE_FCA_3_GEP]] to i64
+; CHECK-NEXT:    [[TMP30:%.*]] = xor i64 [[TMP29]], 193514046488576
+; CHECK-NEXT:    [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr
+; CHECK-NEXT:    [[_MSLD3:%.*]] = load i128, ptr [[TMP31]], align 16
+; CHECK-NEXT:    [[TMP32:%.*]] = insertvalue [4 x i128] [i128 -1, i128 -1, i128 -1, i128 -1], i128 [[_MSLD]], 0
+; CHECK-NEXT:    [[DOTFCA_0_INSERT118:%.*]] = insertvalue [4 x fp128] poison, fp128 [[AGG_TMP_SROA_0_0_COPYLOAD]], 0
+; CHECK-NEXT:    [[TMP33:%.*]] = insertvalue [4 x i128] [[TMP32]], i128 [[_MSLD1]], 1
+; CHECK-NEXT:    [[DOTFCA_1_INSERT119:%.*]] = insertvalue [4 x fp128] [[DOTFCA_0_INSERT118]], fp128 [[AGG_TMP_SROA_2_0_COPYLOAD]], 1
+; CHECK-NEXT:    [[TMP34:%.*]] = insertvalue [4 x i128] [[TMP33]], i128 [[_MSLD2]], 2
+; CHECK-NEXT:    [[DOTFCA_2_INSERT120:%.*]] = insertvalue [4 x fp128] [[DOTFCA_1_INSERT119]], fp128 [[AGG_TMP_SROA_3_0_COPYLOAD]], 2
+; CHECK-NEXT:    [[TMP35:%.*]] = insertvalue [4 x i128] [[TMP34]], i128 [[_MSLD3]], 3
+; CHECK-NEXT:    [[DOTFCA_3_INSERT121:%.*]] = insertvalue [4 x fp128] [[DOTFCA_2_INSERT120]], fp128 [[AGG_TMP_SROA_4_0_COPYLOAD]], 3
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 192) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 256) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 320) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 384) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 448) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 512) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 576) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 640) to ptr), align 8
+; CHECK-NEXT:    store [4 x i128] [[TMP35]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 704) to ptr), align 8
+; CHECK-NEXT:    store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], i32 noundef 20, [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]], [4 x fp128] alignstack(16) [[DOTFCA_3_INSERT121]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.LongDouble4, align 16
+  %arg.coerce.fca.0.extract = extractvalue [4 x fp128] %arg.coerce, 0
+  store fp128 %arg.coerce.fca.0.extract, ptr %arg, align 16
+  %arg.coerce.fca.1.extract = extractvalue [4 x fp128] %arg.coerce, 1
+  %arg.coerce.fca.1.gep = getelementptr inbounds [4 x fp128], ptr %arg, i64 0, i64 1
+  store fp128 %arg.coerce.fca.1.extract, ptr %arg.coerce.fca.1.gep, align 16
+  %arg.coerce.fca.2.extract = extractvalue [4 x fp128] %arg.coerce, 2
+  %arg.coerce.fca.2.gep = getelementptr inbounds [4 x fp128], ptr %arg, i64 0, i64 2
+  store fp128 %arg.coerce.fca.2.extract, ptr %arg.coerce.fca.2.gep, align 16
+  %arg.coerce.fca.3.extract = extractvalue [4 x fp128] %arg.coerce, 3
+  %arg.coerce.fca.3.gep = getelementptr inbounds [4 x fp128], ptr %arg, i64 0, i64 3
+  store fp128 %arg.coerce.fca.3.extract, ptr %arg.coerce.fca.3.gep, align 16
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load fp128, ptr %arg, align 16
+  %agg.tmp.sroa.2.0.copyload = load fp128, ptr %arg.coerce.fca.1.gep, align 16
+  %agg.tmp.sroa.3.0.copyload = load fp128, ptr %arg.coerce.fca.2.gep, align 16
+  %agg.tmp.sroa.4.0.copyload = load fp128, ptr %arg.coerce.fca.3.gep, align 16
+  %.fca.0.insert118 = insertvalue [4 x fp128] poison, fp128 %agg.tmp.sroa.0.0.copyload, 0
+  %.fca.1.insert119 = insertvalue [4 x fp128] %.fca.0.insert118, fp128 %agg.tmp.sroa.2.0.copyload, 1
+  %.fca.2.insert120 = insertvalue [4 x fp128] %.fca.1.insert119, fp128 %agg.tmp.sroa.3.0.copyload, 2
+  %.fca.3.insert121 = insertvalue [4 x fp128] %.fca.2.insert120, fp128 %agg.tmp.sroa.4.0.copyload, 3
+  call void ([4 x fp128], i32, ...) @_Z5test2I11LongDouble4EvT_iz([4 x fp128] alignstack(16) %.fca.3.insert121, i32 noundef 20, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121, [4 x fp128] alignstack(16) %.fca.3.insert121)
+  ret void
+}

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
new file mode 100644
index 000000000000000..f20f7e6a59587e7
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
@@ -0,0 +1,1314 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.Double4 = type { [4 x double] }
+%struct.LongDouble2 = type { [2 x x86_fp80] }
+%struct.LongDouble4 = type { [4 x x86_fp80] }
+%struct.IntInt = type { i32, i32 }
+%struct.Int64Int64 = type { i64, i64 }
+%struct.DoubleDouble = type { double, double }
+%struct.DoubleFloat = type { double, float }
+%struct.__va_list_tag = type { i32, i32, ptr, ptr }
+
+define linkonce_odr dso_local void @_Z4testIcEvT_(i8 noundef signext %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIcEvT_(
+; CHECK-SAME: i8 noundef signext [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca i8, align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[TMP3]], i8 -1, i64 1, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i8 [[TMP0]], ptr [[TMP6]], align 1
+; CHECK-NEXT:    store i8 [[ARG]], ptr [[ARG_ADDR]], align 1
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARG_ADDR]], align 1
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i8, ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSPROP:%.*]] = sext i8 [[_MSLD]] to i32
+; CHECK-NEXT:    [[CONV:%.*]] = sext i8 [[TMP7]] to i32
+; CHECK-NEXT:    store i8 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSPROP]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef signext [[TMP7]], i32 noundef 1, i32 noundef [[CONV]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca i8, align 1
+  store i8 %arg, ptr %arg.addr, align 1
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load i8, ptr %arg.addr, align 1
+  %conv = sext i8 %0 to i32
+  call void (i8, i32, ...) @_Z5test2IcEvT_iz(i8 noundef signext %0, i32 noundef 1, i32 noundef %conv)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testIiEvT_(i32 noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIiEvT_(
+; CHECK-SAME: i32 noundef [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 4, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i32 [[TMP0]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    store i32 [[ARG]], ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef [[TMP7]], i32 noundef 1, i32 noundef [[TMP7]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca i32, align 4
+  store i32 %arg, ptr %arg.addr, align 4
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load i32, ptr %arg.addr, align 4
+  call void (i32, i32, ...) @_Z5test2IiEvT_iz(i32 noundef %0, i32 noundef 1, i32 noundef %0)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testIfEvT_(float noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIfEvT_(
+; CHECK-SAME: float noundef [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca float, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 4, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i32 [[TMP0]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    store float [[ARG]], ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load float, ptr [[ARG_ADDR]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[_MSLD]] to i64
+; CHECK-NEXT:    [[CONV:%.*]] = fpext float [[TMP7]] to double
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef [[TMP7]], i32 noundef 1, double noundef [[CONV]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca float, align 4
+  store float %arg, ptr %arg.addr, align 4
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load float, ptr %arg.addr, align 4
+  %conv = fpext float %0 to double
+  call void (float, i32, ...) @_Z5test2IfEvT_iz(float noundef %0, i32 noundef 1, double noundef %conv)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testIdEvT_(double noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIdEvT_(
+; CHECK-SAME: double noundef [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca double, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 8, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[TMP6]], align 8
+; CHECK-NEXT:    store double [[ARG]], ptr [[ARG_ADDR]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load double, ptr [[ARG_ADDR]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef [[TMP7]], i32 noundef 1, double noundef [[TMP7]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca double, align 8
+  store double %arg, ptr %arg.addr, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load double, ptr %arg.addr, align 8
+  call void (double, i32, ...) @_Z5test2IdEvT_iz(double noundef %0, i32 noundef 1, double noundef %0)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testIeEvT_(x86_fp80 noundef %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testIeEvT_(
+; CHECK-SAME: x86_fp80 noundef [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i80, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG_ADDR:%.*]] = alloca x86_fp80, align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP3]], i8 -1, i64 16, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i80 [[TMP0]], ptr [[TMP6]], align 16
+; CHECK-NEXT:    store x86_fp80 [[ARG]], ptr [[ARG_ADDR]], align 16
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG_ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = load x86_fp80, ptr [[ARG_ADDR]], align 16
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i80, ptr [[TMP10]], align 16
+; CHECK-NEXT:    store i80 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store i80 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (x86_fp80, i32, ...) @_Z5test2IeEvT_iz(x86_fp80 noundef [[TMP7]], i32 noundef 1, x86_fp80 noundef [[TMP7]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg.addr = alloca x86_fp80, align 16
+  store x86_fp80 %arg, ptr %arg.addr, align 16
+  call void @_Z3usePv(ptr noundef nonnull %arg.addr)
+  %0 = load x86_fp80, ptr %arg.addr, align 16
+  call void (x86_fp80, i32, ...) @_Z5test2IeEvT_iz(x86_fp80 noundef %0, i32 noundef 1, x86_fp80 noundef %0)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(i64 %arg.coerce) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI6IntIntEvT_(
+; CHECK-SAME: i64 [[ARG_COERCE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_INTINT:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 8, i1 false)
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[TMP6]], align 8
+; CHECK-NEXT:    store i64 [[ARG_COERCE]], ptr [[ARG]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP9]], align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.IntInt, align 8
+  store i64 %arg.coerce, ptr %arg, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load i64, ptr %arg, align 8
+  call void (i64, i32, ...) @_Z5test2I6IntIntEvT_iz(i64 %agg.tmp.sroa.0.0.copyload, i32 noundef 1, i64 %agg.tmp.sroa.0.0.copyload)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(i64 %arg.coerce0, i64 %arg.coerce1) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI10Int64Int64EvT_(
+; CHECK-SAME: i64 [[ARG_COERCE0:%.*]], i64 [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_INT64INT64:%.*]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[ARG_COERCE0]], ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[ARG]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i64 [[TMP1]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    store i64 [[ARG_COERCE1]], ptr [[TMP8]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[TMP8]], align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (i64, i64, i32, ...) @_Z5test2I10Int64Int64EvT_iz(i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.Int64Int64, align 8
+  store i64 %arg.coerce0, ptr %arg, align 8
+  %0 = getelementptr inbounds { i64, i64 }, ptr %arg, i64 0, i32 1
+  store i64 %arg.coerce1, ptr %0, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load i64, ptr %arg, align 8
+  %agg.tmp.sroa.2.0.copyload = load i64, ptr %0, align 8
+  call void (i64, i64, i32, ...) @_Z5test2I10Int64Int64EvT_iz(i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i32 noundef 1, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(double %arg.coerce0, double %arg.coerce1) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI12DoubleDoubleEvT_(
+; CHECK-SAME: double [[ARG_COERCE0:%.*]], double [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_DOUBLEDOUBLE:%.*]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[TMP7]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE0]], ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds { double, double }, ptr [[ARG]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i64 [[TMP1]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE1]], ptr [[TMP8]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load double, ptr [[TMP8]], align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i64, ptr [[TMP17]], align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (double, double, i32, ...) @_Z5test2I12DoubleDoubleEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], double [[AGG_TMP_SROA_2_0_COPYLOAD]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.DoubleDouble, align 8
+  store double %arg.coerce0, ptr %arg, align 8
+  %0 = getelementptr inbounds { double, double }, ptr %arg, i64 0, i32 1
+  store double %arg.coerce1, ptr %0, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load double, ptr %arg, align 8
+  %agg.tmp.sroa.2.0.copyload = load double, ptr %0, align 8
+  call void (double, double, i32, ...) @_Z5test2I12DoubleDoubleEvT_iz(double %agg.tmp.sroa.0.0.copyload, double %agg.tmp.sroa.2.0.copyload, i32 noundef 1, double %agg.tmp.sroa.0.0.copyload, double %agg.tmp.sroa.2.0.copyload)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI7Double4EvT_(ptr noundef byval(%struct.Double4) align 8 %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI7Double4EvT_(
+; CHECK-SAME: ptr noundef byval([[STRUCT_DOUBLE4:%.*]]) align 8 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_param_tls, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false)
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false)
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 32, i1 false)
+; CHECK-NEXT:    store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (ptr, i32, ...) @_Z5test2I7Double4EvT_iz(ptr noundef nonnull byval([[STRUCT_DOUBLE4]]) align 8 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_DOUBLE4]]) align 8 [[ARG]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  call void (ptr, i32, ...) @_Z5test2I7Double4EvT_iz(ptr noundef nonnull byval(%struct.Double4) align 8 %arg, i32 noundef 1, ptr noundef nonnull byval(%struct.Double4) align 8 %arg)
+  ret void
+}
+
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
+
+define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(double %arg.coerce0, float %arg.coerce1) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI11DoubleFloatEvT_(
+; CHECK-SAME: double [[ARG_COERCE0:%.*]], float [[ARG_COERCE1:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARG:%.*]] = alloca [[STRUCT_DOUBLEFLOAT:%.*]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i64 [[TMP0]], ptr [[TMP7]], align 8
+; CHECK-NEXT:    store double [[ARG_COERCE0]], ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds { double, float }, ptr [[ARG]], i64 0, i32 1
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store i32 [[TMP1]], ptr [[TMP11]], align 8
+; CHECK-NEXT:    store float [[ARG_COERCE1]], ptr [[TMP8]], align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[ARG]], align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP14]], align 8
+; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load float, ptr [[TMP8]], align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[TMP8]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i32, ptr [[TMP17]], align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 80) to ptr), align 8
+; CHECK-NEXT:    store i32 [[_MSLD1]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 96) to ptr), align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (double, float, i32, ...) @_Z5test2I11DoubleFloatEvT_iz(double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]], i32 noundef 1, double [[AGG_TMP_SROA_0_0_COPYLOAD]], float [[AGG_TMP_SROA_2_0_COPYLOAD]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arg = alloca %struct.DoubleFloat, align 8
+  store double %arg.coerce0, ptr %arg, align 8
+  %0 = getelementptr inbounds { double, float }, ptr %arg, i64 0, i32 1
+  store float %arg.coerce1, ptr %0, align 8
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  %agg.tmp.sroa.0.0.copyload = load double, ptr %arg, align 8
+  %agg.tmp.sroa.2.0.copyload = load float, ptr %0, align 8
+  call void (double, float, i32, ...) @_Z5test2I11DoubleFloatEvT_iz(double %agg.tmp.sroa.0.0.copyload, float %agg.tmp.sroa.2.0.copyload, i32 noundef 1, double %agg.tmp.sroa.0.0.copyload, float %agg.tmp.sroa.2.0.copyload)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_(ptr noundef byval(%struct.LongDouble2) align 16 %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI11LongDouble2EvT_(
+; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE2:%.*]]) align 16 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_param_tls, i64 32, i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 32, i1 false)
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP8]], i64 32, i1 false)
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 32, i1 false)
+; CHECK-NEXT:    store i64 32, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (ptr, i32, ...) @_Z5test2I11LongDouble2EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE2]]) align 16 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE2]]) align 16 [[ARG]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  call void (ptr, i32, ...) @_Z5test2I11LongDouble2EvT_iz(ptr noundef nonnull byval(%struct.LongDouble2) align 16 %arg, i32 noundef 1, ptr noundef nonnull byval(%struct.LongDouble2) align 16 %arg)
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_(ptr noundef byval(%struct.LongDouble4) align 16 %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4testI11LongDouble4EvT_(
+; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE4:%.*]]) align 16 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_param_tls, i64 64, i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false)
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false)
+; CHECK-NEXT:    store i64 64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 1, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, i32 noundef 1, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg)
+  ret void
+}
+
+declare void @_Z3usePv(ptr noundef) local_unnamed_addr #3
+
+define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IcEvT_iz(
+; CHECK-SAME: i8 noundef signext [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4
+
+declare void @llvm.va_start(ptr) #5
+
+declare void @llvm.va_end(ptr) #5
+
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4
+
+define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
+; CHECK-SAME: i32 noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IfEvT_iz(
+; CHECK-SAME: float noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IdEvT_iz(
+; CHECK-SAME: double noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IeEvT_iz(
+; CHECK-SAME: x86_fp80 noundef [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(
+; CHECK-SAME: i64 [[T_COERCE:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0, i64 %t.coerce1, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(
+; CHECK-SAME: i64 [[T_COERCE0:%.*]], i64 [[T_COERCE1:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coerce0, double %t.coerce1, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(
+; CHECK-SAME: double [[T_COERCE0:%.*]], double [[T_COERCE1:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%struct.Double4) align 8 %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(
+; CHECK-SAME: ptr noundef byval([[STRUCT_DOUBLE4:%.*]]) align 8 [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerce0, float %t.coerce1, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(
+; CHECK-SAME: double [[T_COERCE0:%.*]], float [[T_COERCE1:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byval(%struct.LongDouble2) align 16 %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(
+; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE2:%.*]]) align 16 [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byval(%struct.LongDouble4) align 16 %t, i32 noundef %n, ...) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(
+; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE4:%.*]]) align 16 [[T:%.*]], i32 noundef [[N:%.*]], ...) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
+; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 -1, i64 24, i1 false)
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 24, i1 false)
+; CHECK-NEXT:    call void @llvm.va_start(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 16
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP16]], ptr align 16 [[TMP2]], i64 176, i1 false)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[ARGS]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP24]], i64 [[TMP0]], i1 false)
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.va_end(ptr nonnull [[ARGS]])
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %args = alloca [1 x %struct.__va_list_tag], align 16
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+  call void @llvm.va_start(ptr nonnull %args)
+  call void @_Z3usePv(ptr noundef nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+  ret void
+}
+
+define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(ptr noundef byval(%struct.LongDouble4) align 16 %arg) sanitize_memory {
+; CHECK-LABEL: define linkonce_odr dso_local void @_Z4test3I11LongDouble4EvT_(
+; CHECK-SAME: ptr noundef byval([[STRUCT_LONGDOUBLE4:%.*]]) align 16 [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_param_tls, i64 64, i1 false)
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @_Z3usePv(ptr noundef nonnull [[ARG]])
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_param_tls, ptr align 8 [[TMP5]], i64 64, i1 false)
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), ptr align 8 [[TMP8]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), ptr align 8 [[TMP11]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 200) to ptr), ptr align 8 [[TMP14]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), ptr align 8 [[TMP17]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = xor i64 [[TMP18]], 87960930222080
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 328) to ptr), ptr align 8 [[TMP20]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 392) to ptr), ptr align 8 [[TMP23]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = xor i64 [[TMP24]], 87960930222080
+; CHECK-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 456) to ptr), ptr align 8 [[TMP26]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP27:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP28:%.*]] = xor i64 [[TMP27]], 87960930222080
+; CHECK-NEXT:    [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 520) to ptr), ptr align 8 [[TMP29]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP31:%.*]] = xor i64 [[TMP30]], 87960930222080
+; CHECK-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 584) to ptr), ptr align 8 [[TMP32]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP34:%.*]] = xor i64 [[TMP33]], 87960930222080
+; CHECK-NEXT:    [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 648) to ptr), ptr align 8 [[TMP35]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP36:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP37:%.*]] = xor i64 [[TMP36]], 87960930222080
+; CHECK-NEXT:    [[TMP38:%.*]] = inttoptr i64 [[TMP37]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 712) to ptr), ptr align 8 [[TMP38]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP39:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP40:%.*]] = xor i64 [[TMP39]], 87960930222080
+; CHECK-NEXT:    [[TMP41:%.*]] = inttoptr i64 [[TMP40]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP41]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP42:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP43:%.*]] = xor i64 [[TMP42]], 87960930222080
+; CHECK-NEXT:    [[TMP44:%.*]] = inttoptr i64 [[TMP43]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 240) to ptr), ptr align 8 [[TMP44]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP45:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP46:%.*]] = xor i64 [[TMP45]], 87960930222080
+; CHECK-NEXT:    [[TMP47:%.*]] = inttoptr i64 [[TMP46]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 304) to ptr), ptr align 8 [[TMP47]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP48:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP49:%.*]] = xor i64 [[TMP48]], 87960930222080
+; CHECK-NEXT:    [[TMP50:%.*]] = inttoptr i64 [[TMP49]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 368) to ptr), ptr align 8 [[TMP50]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP51:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP52:%.*]] = xor i64 [[TMP51]], 87960930222080
+; CHECK-NEXT:    [[TMP53:%.*]] = inttoptr i64 [[TMP52]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 432) to ptr), ptr align 8 [[TMP53]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP54:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP55:%.*]] = xor i64 [[TMP54]], 87960930222080
+; CHECK-NEXT:    [[TMP56:%.*]] = inttoptr i64 [[TMP55]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 496) to ptr), ptr align 8 [[TMP56]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP57:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP58:%.*]] = xor i64 [[TMP57]], 87960930222080
+; CHECK-NEXT:    [[TMP59:%.*]] = inttoptr i64 [[TMP58]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 560) to ptr), ptr align 8 [[TMP59]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP60:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP61:%.*]] = xor i64 [[TMP60]], 87960930222080
+; CHECK-NEXT:    [[TMP62:%.*]] = inttoptr i64 [[TMP61]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 624) to ptr), ptr align 8 [[TMP62]], i64 64, i1 false)
+; CHECK-NEXT:    [[TMP63:%.*]] = ptrtoint ptr [[ARG]] to i64
+; CHECK-NEXT:    [[TMP64:%.*]] = xor i64 [[TMP63]], 87960930222080
+; CHECK-NEXT:    [[TMP65:%.*]] = inttoptr i64 [[TMP64]] to ptr
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 688) to ptr), ptr align 8 [[TMP65]], i64 64, i1 false)
+; CHECK-NEXT:    store i64 1280, ptr @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], i32 noundef 20, ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]], ptr noundef nonnull byval([[STRUCT_LONGDOUBLE4]]) align 16 [[ARG]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @_Z3usePv(ptr noundef nonnull %arg)
+  call void (ptr, i32, ...) @_Z5test2I11LongDouble4EvT_iz(ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, i32 noundef 20, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg, ptr noundef nonnull byval(%struct.LongDouble4) align 16 %arg)
+  ret void
+}


        


More information about the llvm-commits mailing list