[clang] [llvm] [msan] Implement support for Arm NEON vst{2,3,4} instructions (PR #99360)

Thurston Dang via cfe-commits cfe-commits at lists.llvm.org
Wed Jul 17 12:51:07 PDT 2024


https://github.com/thurstond updated https://github.com/llvm/llvm-project/pull/99360

>From 5550f59aa9e7980bc4758878faae6173a5491f45 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 3 Jul 2024 22:45:45 +0000
Subject: [PATCH 01/14] [msan] Implement support for Arm NEON vst{2,3,4}
 instructions

This adds support for vst{2,3,4}, which are not correctly handled by
handleUnknownIntrinsic/handleVector{Load,Store}Intrinsic.
---
 .../CodeGen/aarch64-neon-intrinsics-msan.c    | 7595 +++++++++++++++++
 .../Instrumentation/MemorySanitizer.cpp       |  143 +
 2 files changed, 7738 insertions(+)
 create mode 100644 clang/test/CodeGen/aarch64-neon-intrinsics-msan.c

diff --git a/clang/test/CodeGen/aarch64-neon-intrinsics-msan.c b/clang/test/CodeGen/aarch64-neon-intrinsics-msan.c
new file mode 100644
index 0000000000000..4d2c9975dd1cf
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-neon-intrinsics-msan.c
@@ -0,0 +1,7595 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
+// RUN:     -S -disable-O0-optnone \
+// RUN:  -flax-vector-conversions=none -emit-llvm -o - %s \
+// RUN: | opt -S -passes=mem2reg \
+// RUN: | opt -S -passes=msan \
+// RUN: | FileCheck %s
+
+// REQUIRES: aarch64-registered-target || arm-registered-target
+
+// Forked from aarch64-neon-intrinsics.c
+
+#include <arm_neon.h>
+
+// CHECK-LABEL: define dso_local <16 x i8> @test_vld1q_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vld1q_u8(uint8_t const *a) {
+  return vld1q_u8(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i16> @test_vld1q_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[A]], align 2
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vld1q_u16(uint16_t const *a) {
+  return vld1q_u16(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x i32> @test_vld1q_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 4
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+uint32x4_t test_vld1q_u32(uint32_t const *a) {
+  return vld1q_u32(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x i64> @test_vld1q_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr [[A]], align 8
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x i64> [[TMP0]]
+//
+uint64x2_t test_vld1q_u64(uint64_t const *a) {
+  return vld1q_u64(a);
+}
+
+// CHECK-LABEL: define dso_local <16 x i8> @test_vld1q_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vld1q_s8(int8_t const *a) {
+  return vld1q_s8(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i16> @test_vld1q_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[A]], align 2
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vld1q_s16(int16_t const *a) {
+  return vld1q_s16(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x i32> @test_vld1q_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 4
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vld1q_s32(int32_t const *a) {
+  return vld1q_s32(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x i64> @test_vld1q_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr [[A]], align 8
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x i64> [[TMP0]]
+//
+int64x2_t test_vld1q_s64(int64_t const *a) {
+  return vld1q_s64(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x half> @test_vld1q_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x half>, ptr [[A]], align 2
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x half> [[TMP0]]
+//
+float16x8_t test_vld1q_f16(float16_t const *a) {
+  return vld1q_f16(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_vld1q_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A]], align 4
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vld1q_f32(float32_t const *a) {
+  return vld1q_f32(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x double> @test_vld1q_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A]], align 8
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x double> [[TMP0]]
+//
+float64x2_t test_vld1q_f64(float64_t const *a) {
+  return vld1q_f64(a);
+}
+
+// CHECK-LABEL: define dso_local <16 x i8> @test_vld1q_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
+//
+poly8x16_t test_vld1q_p8(poly8_t const *a) {
+  return vld1q_p8(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i16> @test_vld1q_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[A]], align 2
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
+//
+poly16x8_t test_vld1q_p16(poly16_t const *a) {
+  return vld1q_p16(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
+//
+uint8x8_t test_vld1_u8(uint8_t const *a) {
+  return vld1_u8(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 2
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
+//
+uint16x4_t test_vld1_u16(uint16_t const *a) {
+  return vld1_u16(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x i32> @test_vld1_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A]], align 4
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x i32> [[TMP0]]
+//
+uint32x2_t test_vld1_u32(uint32_t const *a) {
+  return vld1_u32(a);
+}
+
+// CHECK-LABEL: define dso_local <1 x i64> @test_vld1_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x i64>, ptr [[A]], align 8
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <1 x i64> [[TMP0]]
+//
+uint64x1_t test_vld1_u64(uint64_t const *a) {
+  return vld1_u64(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
+//
+int8x8_t test_vld1_s8(int8_t const *a) {
+  return vld1_s8(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 2
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
+//
+int16x4_t test_vld1_s16(int16_t const *a) {
+  return vld1_s16(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x i32> @test_vld1_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A]], align 4
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x i32> [[TMP0]]
+//
+int32x2_t test_vld1_s32(int32_t const *a) {
+  return vld1_s32(a);
+}
+
+// CHECK-LABEL: define dso_local <1 x i64> @test_vld1_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x i64>, ptr [[A]], align 8
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <1 x i64> [[TMP0]]
+//
+int64x1_t test_vld1_s64(int64_t const *a) {
+  return vld1_s64(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_vld1_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x half>, ptr [[A]], align 2
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x half> [[TMP0]]
+//
+float16x4_t test_vld1_f16(float16_t const *a) {
+  return vld1_f16(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x float> @test_vld1_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[A]], align 4
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x float> [[TMP0]]
+//
+float32x2_t test_vld1_f32(float32_t const *a) {
+  return vld1_f32(a);
+}
+
+// CHECK-LABEL: define dso_local <1 x double> @test_vld1_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x double>, ptr [[A]], align 8
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <1 x double> [[TMP0]]
+//
+float64x1_t test_vld1_f64(float64_t const *a) {
+  return vld1_f64(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
+//
+poly8x8_t test_vld1_p8(poly8_t const *a) {
+  return vld1_p8(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 2
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
+//
+poly16x4_t test_vld1_p16(poly16_t const *a) {
+  return vld1_p16(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_u8_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
+//
+uint8x8_t test_vld1_u8_void(void *a) {
+  return vld1_u8(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_u16_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 1
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
+//
+uint16x4_t test_vld1_u16_void(void *a) {
+  return vld1_u16(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x i32> @test_vld1_u32_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A]], align 1
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x i32> [[TMP0]]
+//
+uint32x2_t test_vld1_u32_void(void *a) {
+  return vld1_u32(a);
+}
+
+// CHECK-LABEL: define dso_local <1 x i64> @test_vld1_u64_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x i64>, ptr [[A]], align 1
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <1 x i64> [[TMP0]]
+//
+uint64x1_t test_vld1_u64_void(void *a) {
+  return vld1_u64(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_s8_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
+//
+int8x8_t test_vld1_s8_void(void *a) {
+  return vld1_s8(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_s16_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 1
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
+//
+int16x4_t test_vld1_s16_void(void *a) {
+  return vld1_s16(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x i32> @test_vld1_s32_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A]], align 1
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x i32> [[TMP0]]
+//
+int32x2_t test_vld1_s32_void(void *a) {
+  return vld1_s32(a);
+}
+
+// CHECK-LABEL: define dso_local <1 x i64> @test_vld1_s64_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x i64>, ptr [[A]], align 1
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <1 x i64> [[TMP0]]
+//
+int64x1_t test_vld1_s64_void(void *a) {
+  return vld1_s64(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_vld1_f16_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x half>, ptr [[A]], align 1
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x half> [[TMP0]]
+//
+float16x4_t test_vld1_f16_void(void *a) {
+  return vld1_f16(a);
+}
+
+// CHECK-LABEL: define dso_local <2 x float> @test_vld1_f32_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[A]], align 1
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <2 x float> [[TMP0]]
+//
+float32x2_t test_vld1_f32_void(void *a) {
+  return vld1_f32(a);
+}
+
+// CHECK-LABEL: define dso_local <1 x double> @test_vld1_f64_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x double>, ptr [[A]], align 1
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <1 x double> [[TMP0]]
+//
+float64x1_t test_vld1_f64_void(void *a) {
+  return vld1_f64(a);
+}
+
+// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_p8_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
+//
+poly8x8_t test_vld1_p8_void(void *a) {
+  return vld1_p8(a);
+}
+
+// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_p16_void(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 1
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
+//
+poly16x4_t test_vld1_p16_void(void *a) {
+  return vld1_p16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint8x16x2_t @test_vld2q_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X16X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X16X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X16X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT8X16X2_T]] [[TMP10]]
+//
+uint8x16x2_t test_vld2q_u8(uint8_t const *a) {
+  return vld2q_u8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint16x8x2_t @test_vld2q_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X8X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X8X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X8X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT16X8X2_T]] [[TMP10]]
+//
+uint16x8x2_t test_vld2q_u16(uint16_t const *a) {
+  return vld2q_u16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint32x4x2_t @test_vld2q_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X4X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X4X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X4X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT32X4X2_T]] [[TMP10]]
+//
+uint32x4x2_t test_vld2q_u32(uint32_t const *a) {
+  return vld2q_u32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint64x2x2_t @test_vld2q_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X2X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT64X2X2_T]] [[TMP10]]
+//
+uint64x2x2_t test_vld2q_u64(uint64_t const *a) {
+  return vld2q_u64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int8x16x2_t @test_vld2q_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X16X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X16X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X16X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT8X16X2_T]] [[TMP10]]
+//
+int8x16x2_t test_vld2q_s8(int8_t const *a) {
+  return vld2q_s8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int16x8x2_t @test_vld2q_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X8X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X8X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X8X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT16X8X2_T]] [[TMP10]]
+//
+int16x8x2_t test_vld2q_s16(int16_t const *a) {
+  return vld2q_s16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int32x4x2_t @test_vld2q_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X4X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X4X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X4X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT32X4X2_T]] [[TMP10]]
+//
+int32x4x2_t test_vld2q_s32(int32_t const *a) {
+  return vld2q_s32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int64x2x2_t @test_vld2q_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X2X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT64X2X2_T]] [[TMP10]]
+//
+int64x2x2_t test_vld2q_s64(int64_t const *a) {
+  return vld2q_s64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float16x8x2_t @test_vld2q_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x half>, <8 x half> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT16X8X2_T]] [[TMP10]]
+//
+float16x8x2_t test_vld2q_f16(float16_t const *a) {
+  return vld2q_f16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float32x4x2_t @test_vld2q_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X4X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X4X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x float>, <4 x float> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X4X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT32X4X2_T]] [[TMP10]]
+//
+float32x4x2_t test_vld2q_f32(float32_t const *a) {
+  return vld2q_f32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x2x2_t @test_vld2q_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x double>, <2 x double> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X2_T]] [[TMP10]]
+//
+float64x2x2_t test_vld2q_f64(float64_t const *a) {
+  return vld2q_f64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly8x16x2_t @test_vld2q_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X16X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X16X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X16X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY8X16X2_T]] [[TMP10]]
+//
+poly8x16x2_t test_vld2q_p8(poly8_t const *a) {
+  return vld2q_p8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly16x8x2_t @test_vld2q_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X8X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X8X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X8X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY16X8X2_T]] [[TMP10]]
+//
+poly16x8x2_t test_vld2q_p16(poly16_t const *a) {
+  return vld2q_p16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint8x8x2_t @test_vld2_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X8X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X8X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X8X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT8X8X2_T]] [[TMP10]]
+//
+uint8x8x2_t test_vld2_u8(uint8_t const *a) {
+  return vld2_u8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint16x4x2_t @test_vld2_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X4X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X4X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X4X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT16X4X2_T]] [[TMP10]]
+//
+uint16x4x2_t test_vld2_u16(uint16_t const *a) {
+  return vld2_u16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint32x2x2_t @test_vld2_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X2X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X2X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X2X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT32X2X2_T]] [[TMP10]]
+//
+uint32x2x2_t test_vld2_u32(uint32_t const *a) {
+  return vld2_u32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint64x1x2_t @test_vld2_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X1X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT64X1X2_T]] [[TMP10]]
+//
+uint64x1x2_t test_vld2_u64(uint64_t const *a) {
+  return vld2_u64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int8x8x2_t @test_vld2_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X8X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X8X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X8X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT8X8X2_T]] [[TMP10]]
+//
+int8x8x2_t test_vld2_s8(int8_t const *a) {
+  return vld2_s8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int16x4x2_t @test_vld2_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X4X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X4X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X4X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT16X4X2_T]] [[TMP10]]
+//
+int16x4x2_t test_vld2_s16(int16_t const *a) {
+  return vld2_s16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int32x2x2_t @test_vld2_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X2X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X2X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X2X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT32X2X2_T]] [[TMP10]]
+//
+int32x2x2_t test_vld2_s32(int32_t const *a) {
+  return vld2_s32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int64x1x2_t @test_vld2_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X1X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT64X1X2_T]] [[TMP10]]
+//
+int64x1x2_t test_vld2_s64(int64_t const *a) {
+  return vld2_s64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float16x4x2_t @test_vld2_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x half>, <4 x half> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT16X4X2_T]] [[TMP10]]
+//
+float16x4x2_t test_vld2_f16(float16_t const *a) {
+  return vld2_f16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float32x2x2_t @test_vld2_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X2X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X2X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x float>, <2 x float> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X2X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT32X2X2_T]] [[TMP10]]
+//
+float32x2x2_t test_vld2_f32(float32_t const *a) {
+  return vld2_f32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x1x2_t @test_vld2_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x double>, <1 x double> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X2_T]] [[TMP10]]
+//
+float64x1x2_t test_vld2_f64(float64_t const *a) {
+  return vld2_f64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly8x8x2_t @test_vld2_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X8X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X8X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X8X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY8X8X2_T]] [[TMP10]]
+//
+poly8x8x2_t test_vld2_p8(poly8_t const *a) {
+  return vld2_p8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly16x4x2_t @test_vld2_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X4X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X4X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X4X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY16X4X2_T]] [[TMP10]]
+//
+poly16x4x2_t test_vld2_p16(poly16_t const *a) {
+  return vld2_p16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint8x16x3_t @test_vld3q_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X16X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X16X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X16X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT8X16X3_T]] [[TMP10]]
+//
+uint8x16x3_t test_vld3q_u8(uint8_t const *a) {
+  return vld3q_u8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint16x8x3_t @test_vld3q_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X8X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X8X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X8X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT16X8X3_T]] [[TMP10]]
+//
+uint16x8x3_t test_vld3q_u16(uint16_t const *a) {
+  return vld3q_u16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint32x4x3_t @test_vld3q_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X4X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X4X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X4X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT32X4X3_T]] [[TMP10]]
+//
+uint32x4x3_t test_vld3q_u32(uint32_t const *a) {
+  return vld3q_u32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint64x2x3_t @test_vld3q_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X2X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT64X2X3_T]] [[TMP10]]
+//
+uint64x2x3_t test_vld3q_u64(uint64_t const *a) {
+  return vld3q_u64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int8x16x3_t @test_vld3q_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X16X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X16X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X16X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT8X16X3_T]] [[TMP10]]
+//
+int8x16x3_t test_vld3q_s8(int8_t const *a) {
+  return vld3q_s8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int16x8x3_t @test_vld3q_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X8X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X8X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X8X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT16X8X3_T]] [[TMP10]]
+//
+int16x8x3_t test_vld3q_s16(int16_t const *a) {
+  return vld3q_s16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int32x4x3_t @test_vld3q_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X4X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X4X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X4X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT32X4X3_T]] [[TMP10]]
+//
+int32x4x3_t test_vld3q_s32(int32_t const *a) {
+  return vld3q_s32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int64x2x3_t @test_vld3q_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X2X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT64X2X3_T]] [[TMP10]]
+//
+int64x2x3_t test_vld3q_s64(int64_t const *a) {
+  return vld3q_s64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float16x8x3_t @test_vld3q_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X8X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x half>, <8 x half>, <8 x half> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X8X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT16X8X3_T]] [[TMP10]]
+//
+float16x8x3_t test_vld3q_f16(float16_t const *a) {
+  return vld3q_f16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float32x4x3_t @test_vld3q_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X4X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X4X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x float>, <4 x float>, <4 x float> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X4X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT32X4X3_T]] [[TMP10]]
+//
+float32x4x3_t test_vld3q_f32(float32_t const *a) {
+  return vld3q_f32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x2x3_t @test_vld3q_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x double>, <2 x double>, <2 x double> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X3_T]] [[TMP10]]
+//
+float64x2x3_t test_vld3q_f64(float64_t const *a) {
+  return vld3q_f64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly8x16x3_t @test_vld3q_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X16X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X16X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X16X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY8X16X3_T]] [[TMP10]]
+//
+poly8x16x3_t test_vld3q_p8(poly8_t const *a) {
+  return vld3q_p8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly16x8x3_t @test_vld3q_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X8X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X8X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X8X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY16X8X3_T]] [[TMP10]]
+//
+poly16x8x3_t test_vld3q_p16(poly16_t const *a) {
+  return vld3q_p16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint8x8x3_t @test_vld3_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X8X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X8X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X8X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT8X8X3_T]] [[TMP10]]
+//
+uint8x8x3_t test_vld3_u8(uint8_t const *a) {
+  return vld3_u8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint16x4x3_t @test_vld3_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X4X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X4X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X4X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT16X4X3_T]] [[TMP10]]
+//
+uint16x4x3_t test_vld3_u16(uint16_t const *a) {
+  return vld3_u16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint32x2x3_t @test_vld3_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X2X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X2X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X2X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT32X2X3_T]] [[TMP10]]
+//
+uint32x2x3_t test_vld3_u32(uint32_t const *a) {
+  return vld3_u32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint64x1x3_t @test_vld3_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X1X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT64X1X3_T]] [[TMP10]]
+//
+uint64x1x3_t test_vld3_u64(uint64_t const *a) {
+  return vld3_u64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int8x8x3_t @test_vld3_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X8X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X8X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X8X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT8X8X3_T]] [[TMP10]]
+//
+int8x8x3_t test_vld3_s8(int8_t const *a) {
+  return vld3_s8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int16x4x3_t @test_vld3_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X4X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X4X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X4X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT16X4X3_T]] [[TMP10]]
+//
+int16x4x3_t test_vld3_s16(int16_t const *a) {
+  return vld3_s16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int32x2x3_t @test_vld3_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X2X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X2X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X2X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT32X2X3_T]] [[TMP10]]
+//
+int32x2x3_t test_vld3_s32(int32_t const *a) {
+  return vld3_s32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int64x1x3_t @test_vld3_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X1X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT64X1X3_T]] [[TMP10]]
+//
+int64x1x3_t test_vld3_s64(int64_t const *a) {
+  return vld3_s64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float16x4x3_t @test_vld3_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X4X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x half>, <4 x half>, <4 x half> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X4X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT16X4X3_T]] [[TMP10]]
+//
+float16x4x3_t test_vld3_f16(float16_t const *a) {
+  return vld3_f16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float32x2x3_t @test_vld3_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X2X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X2X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x float>, <2 x float>, <2 x float> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X2X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT32X2X3_T]] [[TMP10]]
+//
+float32x2x3_t test_vld3_f32(float32_t const *a) {
+  return vld3_f32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x1x3_t @test_vld3_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x double>, <1 x double>, <1 x double> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X3_T]] [[TMP10]]
+//
+float64x1x3_t test_vld3_f64(float64_t const *a) {
+  return vld3_f64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly8x8x3_t @test_vld3_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X8X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X8X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X8X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY8X8X3_T]] [[TMP10]]
+//
+poly8x8x3_t test_vld3_p8(poly8_t const *a) {
+  return vld3_p8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly16x4x3_t @test_vld3_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X4X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X4X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X4X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY16X4X3_T]] [[TMP10]]
+//
+poly16x4x3_t test_vld3_p16(poly16_t const *a) {
+  return vld3_p16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint8x16x4_t @test_vld4q_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X16X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X16X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X16X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT8X16X4_T]] [[TMP10]]
+//
+uint8x16x4_t test_vld4q_u8(uint8_t const *a) {
+  return vld4q_u8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint16x8x4_t @test_vld4q_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X8X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X8X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X8X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT16X8X4_T]] [[TMP10]]
+//
+uint16x8x4_t test_vld4q_u16(uint16_t const *a) {
+  return vld4q_u16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint32x4x4_t @test_vld4q_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X4X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X4X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X4X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT32X4X4_T]] [[TMP10]]
+//
+uint32x4x4_t test_vld4q_u32(uint32_t const *a) {
+  return vld4q_u32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint64x2x4_t @test_vld4q_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X2X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT64X2X4_T]] [[TMP10]]
+//
+uint64x2x4_t test_vld4q_u64(uint64_t const *a) {
+  return vld4q_u64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int8x16x4_t @test_vld4q_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X16X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X16X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X16X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT8X16X4_T]] [[TMP10]]
+//
+int8x16x4_t test_vld4q_s8(int8_t const *a) {
+  return vld4q_s8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int16x8x4_t @test_vld4q_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X8X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X8X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X8X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT16X8X4_T]] [[TMP10]]
+//
+int16x8x4_t test_vld4q_s16(int16_t const *a) {
+  return vld4q_s16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int32x4x4_t @test_vld4q_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X4X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X4X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X4X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT32X4X4_T]] [[TMP10]]
+//
+int32x4x4_t test_vld4q_s32(int32_t const *a) {
+  return vld4q_s32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int64x2x4_t @test_vld4q_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X2X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT64X2X4_T]] [[TMP10]]
+//
+int64x2x4_t test_vld4q_s64(int64_t const *a) {
+  return vld4q_s64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float16x8x4_t @test_vld4q_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X8X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x half>, <8 x half>, <8 x half>, <8 x half> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X8X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT16X8X4_T]] [[TMP10]]
+//
+float16x8x4_t test_vld4q_f16(float16_t const *a) {
+  return vld4q_f16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float32x4x4_t @test_vld4q_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X4X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X4X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X4X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT32X4X4_T]] [[TMP10]]
+//
+float32x4x4_t test_vld4q_f32(float32_t const *a) {
+  return vld4q_f32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x2x4_t @test_vld4q_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x double>, <2 x double>, <2 x double>, <2 x double> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X4_T]] [[TMP10]]
+//
+float64x2x4_t test_vld4q_f64(float64_t const *a) {
+  return vld4q_f64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly8x16x4_t @test_vld4q_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X16X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X16X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X16X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY8X16X4_T]] [[TMP10]]
+//
+poly8x16x4_t test_vld4q_p8(poly8_t const *a) {
+  return vld4q_p8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly16x8x4_t @test_vld4q_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X8X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X8X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X8X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY16X8X4_T]] [[TMP10]]
+//
+poly16x8x4_t test_vld4q_p16(poly16_t const *a) {
+  return vld4q_p16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint8x8x4_t @test_vld4_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X8X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X8X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X8X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT8X8X4_T]] [[TMP10]]
+//
+uint8x8x4_t test_vld4_u8(uint8_t const *a) {
+  return vld4_u8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint16x4x4_t @test_vld4_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X4X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X4X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X4X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT16X4X4_T]] [[TMP10]]
+//
+uint16x4x4_t test_vld4_u16(uint16_t const *a) {
+  return vld4_u16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint32x2x4_t @test_vld4_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X2X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X2X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X2X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT32X2X4_T]] [[TMP10]]
+//
+uint32x2x4_t test_vld4_u32(uint32_t const *a) {
+  return vld4_u32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.uint64x1x4_t @test_vld4_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X1X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_UINT64X1X4_T]] [[TMP10]]
+//
+uint64x1x4_t test_vld4_u64(uint64_t const *a) {
+  return vld4_u64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int8x8x4_t @test_vld4_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X8X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X8X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X8X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT8X8X4_T]] [[TMP10]]
+//
+int8x8x4_t test_vld4_s8(int8_t const *a) {
+  return vld4_s8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int16x4x4_t @test_vld4_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X4X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X4X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X4X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT16X4X4_T]] [[TMP10]]
+//
+int16x4x4_t test_vld4_s16(int16_t const *a) {
+  return vld4_s16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int32x2x4_t @test_vld4_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X2X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X2X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X2X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT32X2X4_T]] [[TMP10]]
+//
+int32x2x4_t test_vld4_s32(int32_t const *a) {
+  return vld4_s32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.int64x1x4_t @test_vld4_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X1X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_INT64X1X4_T]] [[TMP10]]
+//
+int64x1x4_t test_vld4_s64(int64_t const *a) {
+  return vld4_s64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float16x4x4_t @test_vld4_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X4X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x half>, <4 x half>, <4 x half>, <4 x half> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X4X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT16X4X4_T]] [[TMP10]]
+//
+float16x4x4_t test_vld4_f16(float16_t const *a) {
+  return vld4_f16(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float32x2x4_t @test_vld4_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X2X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X2X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <2 x float>, <2 x float>, <2 x float>, <2 x float> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X2X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT32X2X4_T]] [[TMP10]]
+//
+float32x2x4_t test_vld4_f32(float32_t const *a) {
+  return vld4_f32(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x1x4_t @test_vld4_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x double>, <1 x double>, <1 x double>, <1 x double> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X4_T]] [[TMP10]]
+//
+float64x1x4_t test_vld4_f64(float64_t const *a) {
+  return vld4_f64(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly8x8x4_t @test_vld4_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X8X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X8X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X8X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY8X8X4_T]] [[TMP10]]
+//
+poly8x8x4_t test_vld4_p8(poly8_t const *a) {
+  return vld4_p8(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly16x4x4_t @test_vld4_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X4X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X4X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X4X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY16X4X4_T]] [[TMP10]]
+//
+poly16x4x4_t test_vld4_p16(poly16_t const *a) {
+  return vld4_p16(a);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP2]], align 1
+// CHECK-NEXT:    store <16 x i8> [[B]], ptr [[A]], align 1
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_u8(uint8_t *a, uint8x16_t b) {
+  vst1q_u8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP4]], align 2
+// CHECK-NEXT:    store <8 x i16> [[TMP1]], ptr [[A]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_u16(uint16_t *a, uint16x8_t b) {
+  vst1q_u16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP4]], align 4
+// CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_u32(uint32_t *a, uint32x4_t b) {
+  vst1q_u32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP4]], align 8
+// CHECK-NEXT:    store <2 x i64> [[TMP1]], ptr [[A]], align 8
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_u64(uint64_t *a, uint64x2_t b) {
+  vst1q_u64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP2]], align 1
+// CHECK-NEXT:    store <16 x i8> [[B]], ptr [[A]], align 1
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_s8(int8_t *a, int8x16_t b) {
+  vst1q_s8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP4]], align 2
+// CHECK-NEXT:    store <8 x i16> [[TMP1]], ptr [[A]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_s16(int16_t *a, int16x8_t b) {
+  vst1q_s16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP4]], align 4
+// CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_s32(int32_t *a, int32x4_t b) {
+  vst1q_s32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP4]], align 8
+// CHECK-NEXT:    store <2 x i64> [[TMP1]], ptr [[A]], align 8
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_s64(int64_t *a, int64x2_t b) {
+  vst1q_s64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP4]], align 2
+// CHECK-NEXT:    store <8 x half> [[TMP1]], ptr [[A]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_f16(float16_t *a, float16x8_t b) {
+  vst1q_f16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP4]], align 4
+// CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_f32(float32_t *a, float32x4_t b) {
+  vst1q_f32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]], <2 x double> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP4]], align 8
+// CHECK-NEXT:    store <2 x double> [[TMP1]], ptr [[A]], align 8
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_f64(float64_t *a, float64x2_t b) {
+  vst1q_f64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP2]], align 1
+// CHECK-NEXT:    store <16 x i8> [[B]], ptr [[A]], align 1
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_p8(poly8_t *a, poly8x16_t b) {
+  vst1q_p8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP4]], align 2
+// CHECK-NEXT:    store <8 x i16> [[TMP1]], ptr [[A]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_p16(poly16_t *a, poly16x8_t b) {
+  vst1q_p16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr [[TMP2]], align 1
+// CHECK-NEXT:    store <8 x i8> [[B]], ptr [[A]], align 1
+// CHECK-NEXT:    ret void
+//
+void test_vst1_u8(uint8_t *a, uint8x8_t b) {
+  vst1_u8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 2
+// CHECK-NEXT:    store <4 x i16> [[TMP1]], ptr [[A]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_vst1_u16(uint16_t *a, uint16x4_t b) {
+  vst1_u16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP4]], align 4
+// CHECK-NEXT:    store <2 x i32> [[TMP1]], ptr [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+void test_vst1_u32(uint32_t *a, uint32x2_t b) {
+  vst1_u32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]], <1 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr [[TMP4]], align 8
+// CHECK-NEXT:    store <1 x i64> [[TMP1]], ptr [[A]], align 8
+// CHECK-NEXT:    ret void
+//
+void test_vst1_u64(uint64_t *a, uint64x1_t b) {
+  vst1_u64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr [[TMP2]], align 1
+// CHECK-NEXT:    store <8 x i8> [[B]], ptr [[A]], align 1
+// CHECK-NEXT:    ret void
+//
+void test_vst1_s8(int8_t *a, int8x8_t b) {
+  vst1_s8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 2
+// CHECK-NEXT:    store <4 x i16> [[TMP1]], ptr [[A]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_vst1_s16(int16_t *a, int16x4_t b) {
+  vst1_s16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP4]], align 4
+// CHECK-NEXT:    store <2 x i32> [[TMP1]], ptr [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+void test_vst1_s32(int32_t *a, int32x2_t b) {
+  vst1_s32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]], <1 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr [[TMP4]], align 8
+// CHECK-NEXT:    store <1 x i64> [[TMP1]], ptr [[A]], align 8
+// CHECK-NEXT:    ret void
+//
+void test_vst1_s64(int64_t *a, int64x1_t b) {
+  vst1_s64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 2
+// CHECK-NEXT:    store <4 x half> [[TMP1]], ptr [[A]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_vst1_f16(float16_t *a, float16x4_t b) {
+  vst1_f16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]], <2 x float> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP4]], align 4
+// CHECK-NEXT:    store <2 x float> [[TMP1]], ptr [[A]], align 4
+// CHECK-NEXT:    ret void
+//
+void test_vst1_f32(float32_t *a, float32x2_t b) {
+  vst1_f32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr [[TMP4]], align 8
+// CHECK-NEXT:    store <1 x double> [[TMP1]], ptr [[A]], align 8
+// CHECK-NEXT:    ret void
+//
+void test_vst1_f64(float64_t *a, float64x1_t b) {
+  vst1_f64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr [[TMP2]], align 1
+// CHECK-NEXT:    store <8 x i8> [[B]], ptr [[A]], align 1
+// CHECK-NEXT:    ret void
+//
+void test_vst1_p8(poly8_t *a, poly8x8_t b) {
+  vst1_p8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
+// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 2
+// CHECK-NEXT:    store <4 x i16> [[TMP1]], ptr [[A]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_vst1_p16(poly16_t *a, poly16x4_t b) {
+  vst1_p16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X16X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X16X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP14]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_u8(uint8_t *a, uint8x16x2_t b) {
+  vst2q_u8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X8X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X8X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP14]], <8 x i16> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_u16(uint16_t *a, uint16x8x2_t b) {
+  vst2q_u16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X4X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X4X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP14]], <4 x i32> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_u32(uint32_t *a, uint32x4x2_t b) {
+  vst2q_u32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP14]], <2 x i64> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_u64(uint64_t *a, uint64x2x2_t b) {
+  vst2q_u64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X16X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X16X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP14]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_s8(int8_t *a, int8x16x2_t b) {
+  vst2q_s8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X8X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X8X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP14]], <8 x i16> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_s16(int16_t *a, int16x8x2_t b) {
+  vst2q_s16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X4X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X4X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP14]], <4 x i32> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_s32(int32_t *a, int32x4x2_t b) {
+  vst2q_s32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP14]], <2 x i64> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_s64(int64_t *a, int64x2x2_t b) {
+  vst2q_s64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <8 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x half>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x half> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x half>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8f16.p0(<8 x half> [[TMP14]], <8 x half> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_f16(float16_t *a, float16x8x2_t b) {
+  vst2q_f16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X4X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X4X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <4 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x float>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x float> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x float>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x float> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x float>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x float>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> [[TMP14]], <4 x float> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_f32(float32_t *a, float32x4x2_t b) {
+  vst2q_f32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2f64.p0(<2 x double> [[TMP14]], <2 x double> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_f64(float64_t *a, float64x2x2_t b) {
+  vst2q_f64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X16X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X16X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP14]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_p8(poly8_t *a, poly8x16x2_t b) {
+  vst2q_p8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2q_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X8X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X8X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP14]], <8 x i16> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2q_p16(poly16_t *a, poly16x8x2_t b) {
+  vst2q_p16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X8X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X8X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP14]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_u8(uint8_t *a, uint8x8x2_t b) {
+  vst2_u8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X4X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X4X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP14]], <4 x i16> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_u16(uint16_t *a, uint16x4x2_t b) {
+  vst2_u16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X2X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X2X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP14]], <2 x i32> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_u32(uint32_t *a, uint32x2x2_t b) {
+  vst2_u32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP14]], <1 x i64> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_u64(uint64_t *a, uint64x1x2_t b) {
+  vst2_u64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X8X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X8X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP14]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_s8(int8_t *a, int8x8x2_t b) {
+  vst2_s8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X4X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X4X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP14]], <4 x i16> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_s16(int16_t *a, int16x4x2_t b) {
+  vst2_s16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X2X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X2X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP14]], <2 x i32> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_s32(int32_t *a, int32x2x2_t b) {
+  vst2_s32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP14]], <1 x i64> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_s64(int64_t *a, int64x1x2_t b) {
+  vst2_s64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <4 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x half>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x half> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x half>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4f16.p0(<4 x half> [[TMP14]], <4 x half> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_f16(float16_t *a, float16x4x2_t b) {
+  vst2_f16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X2X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X2X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <2 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x float>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x float> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x float>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x float> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x float>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x float>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2f32.p0(<2 x float> [[TMP14]], <2 x float> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_f32(float32_t *a, float32x2x2_t b) {
+  vst2_f32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1f64.p0(<1 x double> [[TMP14]], <1 x double> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_f64(float64_t *a, float64x1x2_t b) {
+  vst2_f64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X8X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X8X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP14]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_p8(poly8_t *a, poly8x8x2_t b) {
+  vst2_p8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst2_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X4X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X4X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
+// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
+// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP18]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP14]], <4 x i16> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst2_p16(poly16_t *a, poly16x4x2_t b) {
+  vst2_p16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X16X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X16X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+// CHECK-NEXT:    store <48 x i8> zeroinitializer, ptr [[TMP15]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_u8(uint8_t *a, uint8x16x3_t b) {
+  vst3q_u8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X8X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X8X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <24 x i16> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP16]], <8 x i16> [[TMP17]], <8 x i16> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_u16(uint16_t *a, uint16x8x3_t b) {
+  vst3q_u16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X4X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X4X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i32> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <12 x i32> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP16]], <4 x i32> [[TMP17]], <4 x i32> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_u32(uint32_t *a, uint32x4x3_t b) {
+  vst3q_u32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <6 x i64> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP16]], <2 x i64> [[TMP17]], <2 x i64> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_u64(uint64_t *a, uint64x2x3_t b) {
+  vst3q_u64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X16X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X16X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+// CHECK-NEXT:    store <48 x i8> zeroinitializer, ptr [[TMP15]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_s8(int8_t *a, int8x16x3_t b) {
+  vst3q_s8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X8X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X8X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <24 x i16> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP16]], <8 x i16> [[TMP17]], <8 x i16> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_s16(int16_t *a, int16x8x3_t b) {
+  vst3q_s16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X4X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X4X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i32> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <12 x i32> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP16]], <4 x i32> [[TMP17]], <4 x i32> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_s32(int32_t *a, int32x4x3_t b) {
+  vst3q_s32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <6 x i64> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP16]], <2 x i64> [[TMP17]], <2 x i64> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_s64(int64_t *a, int64x2x3_t b) {
+  vst3q_s64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X8X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X8X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <8 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x half> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x half>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x half> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x half>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x half>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <24 x i16> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8f16.p0(<8 x half> [[TMP16]], <8 x half> [[TMP17]], <8 x half> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_f16(float16_t *a, float16x8x3_t b) {
+  vst3q_f16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X4X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X4X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <4 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x float> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x float> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x float> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x float>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x float>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x float>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <12 x i32> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float> [[TMP16]], <4 x float> [[TMP17]], <4 x float> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_f32(float32_t *a, float32x4x3_t b) {
+  vst3q_f32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x double> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x double>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <6 x i64> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2f64.p0(<2 x double> [[TMP16]], <2 x double> [[TMP17]], <2 x double> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_f64(float64_t *a, float64x2x3_t b) {
+  vst3q_f64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X16X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X16X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+// CHECK-NEXT:    store <48 x i8> zeroinitializer, ptr [[TMP15]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_p8(poly8_t *a, poly8x16x3_t b) {
+  vst3q_p8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3q_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X8X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X8X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <24 x i16> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP16]], <8 x i16> [[TMP17]], <8 x i16> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3q_p16(poly16_t *a, poly16x8x3_t b) {
+  vst3q_p16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X8X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X8X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+// CHECK-NEXT:    store <24 x i8> zeroinitializer, ptr [[TMP15]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_u8(uint8_t *a, uint8x8x3_t b) {
+  vst3_u8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X4X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X4X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <12 x i16> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP16]], <4 x i16> [[TMP17]], <4 x i16> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_u16(uint16_t *a, uint16x4x3_t b) {
+  vst3_u16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X2X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X2X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i32> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x i32>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <6 x i32> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP16]], <2 x i32> [[TMP17]], <2 x i32> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_u32(uint32_t *a, uint32x2x3_t b) {
+  vst3_u32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <3 x i64> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP16]], <1 x i64> [[TMP17]], <1 x i64> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_u64(uint64_t *a, uint64x1x3_t b) {
+  vst3_u64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X8X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X8X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+// CHECK-NEXT:    store <24 x i8> zeroinitializer, ptr [[TMP15]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_s8(int8_t *a, int8x8x3_t b) {
+  vst3_s8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X4X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X4X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <12 x i16> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP16]], <4 x i16> [[TMP17]], <4 x i16> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_s16(int16_t *a, int16x4x3_t b) {
+  vst3_s16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X2X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X2X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i32> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x i32>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <6 x i32> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP16]], <2 x i32> [[TMP17]], <2 x i32> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_s32(int32_t *a, int32x2x3_t b) {
+  vst3_s32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <3 x i64> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP16]], <1 x i64> [[TMP17]], <1 x i64> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_s64(int64_t *a, int64x1x3_t b) {
+  vst3_s64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X4X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X4X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <4 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x half> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x half>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x half> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x half>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x half>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <12 x i16> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4f16.p0(<4 x half> [[TMP16]], <4 x half> [[TMP17]], <4 x half> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_f16(float16_t *a, float16x4x3_t b) {
+  vst3_f16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X2X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X2X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <2 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x float> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x float> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x float>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x float> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x float>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x float>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x float>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <6 x i32> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2f32.p0(<2 x float> [[TMP16]], <2 x float> [[TMP17]], <2 x float> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_f32(float32_t *a, float32x2x3_t b) {
+  vst3_f32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x double> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x double>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <3 x i64> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1f64.p0(<1 x double> [[TMP16]], <1 x double> [[TMP17]], <1 x double> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_f64(float64_t *a, float64x1x3_t b) {
+  vst3_f64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X8X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X8X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
+// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+// CHECK-NEXT:    store <24 x i8> zeroinitializer, ptr [[TMP15]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_p8(poly8_t *a, poly8x8x3_t b) {
+  vst3_p8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst3_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X4X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X4X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
+// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
+// CHECK-NEXT:    store <12 x i16> zeroinitializer, ptr [[TMP21]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP16]], <4 x i16> [[TMP17]], <4 x i16> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst3_p16(poly16_t *a, poly16x4x3_t b) {
+  vst3_p16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X16X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X16X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP13:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+// CHECK-NEXT:    store <64 x i8> zeroinitializer, ptr [[TMP16]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP13]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_u8(uint8_t *a, uint8x16x4_t b) {
+  vst4q_u8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X8X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X8X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i16> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <8 x i16>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <32 x i16> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP18]], <8 x i16> [[TMP19]], <8 x i16> [[TMP20]], <8 x i16> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_u16(uint16_t *a, uint16x8x4_t b) {
+  vst4q_u16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X4X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X4X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i32> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i32>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i32> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <4 x i32>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP18]], <4 x i32> [[TMP19]], <4 x i32> [[TMP20]], <4 x i32> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_u32(uint32_t *a, uint32x4x4_t b) {
+  vst4q_u32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i64> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x i64>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <8 x i64> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP18]], <2 x i64> [[TMP19]], <2 x i64> [[TMP20]], <2 x i64> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_u64(uint64_t *a, uint64x2x4_t b) {
+  vst4q_u64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X16X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X16X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP13:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+// CHECK-NEXT:    store <64 x i8> zeroinitializer, ptr [[TMP16]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP13]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_s8(int8_t *a, int8x16x4_t b) {
+  vst4q_s8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X8X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X8X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i16> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <8 x i16>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <32 x i16> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP18]], <8 x i16> [[TMP19]], <8 x i16> [[TMP20]], <8 x i16> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_s16(int16_t *a, int16x8x4_t b) {
+  vst4q_s16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X4X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X4X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i32> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i32>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i32> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <4 x i32>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP18]], <4 x i32> [[TMP19]], <4 x i32> [[TMP20]], <4 x i32> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_s32(int32_t *a, int32x4x4_t b) {
+  vst4q_s32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i64> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x i64>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <8 x i64> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP18]], <2 x i64> [[TMP19]], <2 x i64> [[TMP20]], <2 x i64> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_s64(int64_t *a, int64x2x4_t b) {
+  vst4q_s64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X8X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X8X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <8 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x half> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x half>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x half> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <8 x half>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x half> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x half>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x half>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <8 x half>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <32 x i16> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8f16.p0(<8 x half> [[TMP18]], <8 x half> [[TMP19]], <8 x half> [[TMP20]], <8 x half> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_f16(float16_t *a, float16x8x4_t b) {
+  vst4q_f16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X4X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X4X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <4 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x float> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x float> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x float> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x float> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x float>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x float>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x float>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <4 x float>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float> [[TMP18]], <4 x float> [[TMP19]], <4 x float> [[TMP20]], <4 x float> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_f32(float32_t *a, float32x4x4_t b) {
+  vst4q_f32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x double> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x double>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x double> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x double>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x double>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <8 x i64> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2f64.p0(<2 x double> [[TMP18]], <2 x double> [[TMP19]], <2 x double> [[TMP20]], <2 x double> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_f64(float64_t *a, float64x2x4_t b) {
+  vst4q_f64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X16X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X16X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP13:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+// CHECK-NEXT:    store <64 x i8> zeroinitializer, ptr [[TMP16]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP13]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_p8(poly8_t *a, poly8x16x4_t b) {
+  vst4q_p8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4q_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X8X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X8X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i16> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <8 x i16>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <32 x i16> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP18]], <8 x i16> [[TMP19]], <8 x i16> [[TMP20]], <8 x i16> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4q_p16(poly16_t *a, poly16x8x4_t b) {
+  vst4q_p16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_u8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X8X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X8X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP16]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], <8 x i8> [[TMP13]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_u8(uint8_t *a, uint8x8x4_t b) {
+  vst4_u8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_u16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X4X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X4X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i16> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <4 x i16>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP18]], <4 x i16> [[TMP19]], <4 x i16> [[TMP20]], <4 x i16> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_u16(uint16_t *a, uint16x4x4_t b) {
+  vst4_u16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_u32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X2X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X2X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i32> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i32>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i32> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x i32>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <2 x i32>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP18]], <2 x i32> [[TMP19]], <2 x i32> [[TMP20]], <2 x i32> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_u32(uint32_t *a, uint32x2x4_t b) {
+  vst4_u32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_u64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x i64> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x i64>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP18]], <1 x i64> [[TMP19]], <1 x i64> [[TMP20]], <1 x i64> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_u64(uint64_t *a, uint64x1x4_t b) {
+  vst4_u64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_s8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X8X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X8X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP16]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], <8 x i8> [[TMP13]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_s8(int8_t *a, int8x8x4_t b) {
+  vst4_s8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_s16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X4X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X4X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i16> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <4 x i16>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP18]], <4 x i16> [[TMP19]], <4 x i16> [[TMP20]], <4 x i16> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_s16(int16_t *a, int16x4x4_t b) {
+  vst4_s16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_s32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X2X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X2X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i32> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i32>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i32> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x i32>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <2 x i32>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP18]], <2 x i32> [[TMP19]], <2 x i32> [[TMP20]], <2 x i32> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_s32(int32_t *a, int32x2x4_t b) {
+  vst4_s32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_s64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x i64> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x i64>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP18]], <1 x i64> [[TMP19]], <1 x i64> [[TMP20]], <1 x i64> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_s64(int64_t *a, int64x1x4_t b) {
+  vst4_s64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_f16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X4X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X4X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <4 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x half> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x half>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x half> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x half>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x half>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x half>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <4 x half>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4f16.p0(<4 x half> [[TMP18]], <4 x half> [[TMP19]], <4 x half> [[TMP20]], <4 x half> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_f16(float16_t *a, float16x4x4_t b) {
+  vst4_f16(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_f32(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X2X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X2X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <2 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x float> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x float> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x float>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x float> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x float>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x float> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x float>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x float>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x float>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <2 x float>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2f32.p0(<2 x float> [[TMP18]], <2 x float> [[TMP19]], <2 x float> [[TMP20]], <2 x float> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_f32(float32_t *a, float32x2x4_t b) {
+  vst4_f32(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_f64(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x double> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x double>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x double> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x double>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x double>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1f64.p0(<1 x double> [[TMP18]], <1 x double> [[TMP19]], <1 x double> [[TMP20]], <1 x double> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_f64(float64_t *a, float64x1x4_t b) {
+  vst4_f64(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_p8(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X8X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X8X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP16]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], <8 x i8> [[TMP13]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_p8(poly8_t *a, poly8x8x4_t b) {
+  vst4_p8(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst4_p16(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X4X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X4X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i16> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <4 x i16>
+// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
+// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
+// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
+// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP24]], align 1
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP18]], <4 x i16> [[TMP19]], <4 x i16> [[TMP20]], <4 x i16> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst4_p16(poly16_t *a, poly16x4x4_t b) {
+  vst4_p16(a, b);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x2x2_t @test_vld1q_f64_x2(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X2_T]] [[TMP10]]
+//
+float64x2x2_t test_vld1q_f64_x2(float64_t const *a) {
+  return vld1q_f64_x2(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly64x2x2_t @test_vld1q_p64_x2(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X2X2_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY64X2X2_T]] [[TMP10]]
+//
+poly64x2x2_t test_vld1q_p64_x2(poly64_t const *a) {
+  return vld1q_p64_x2(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x1x2_t @test_vld1_f64_x2(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X2_T]] [[TMP10]]
+//
+float64x1x2_t test_vld1_f64_x2(float64_t const *a) {
+  return vld1_f64_x2(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly64x1x2_t @test_vld1_p64_x2(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X1X2_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY64X1X2_T]] [[TMP10]]
+//
+poly64x1x2_t test_vld1_p64_x2(poly64_t const *a) {
+  return vld1_p64_x2(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x2x3_t @test_vld1q_f64_x3(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x double>, <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X3_T]] [[TMP10]]
+//
+float64x2x3_t test_vld1q_f64_x3(float64_t const *a) {
+  return vld1q_f64_x3(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly64x2x3_t @test_vld1q_p64_x3(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X2X3_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY64X2X3_T]] [[TMP10]]
+//
+poly64x2x3_t test_vld1q_p64_x3(poly64_t const *a) {
+  return vld1q_p64_x3(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x1x3_t @test_vld1_f64_x3(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x double>, <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X3_T]] [[TMP10]]
+//
+float64x1x3_t test_vld1_f64_x3(float64_t const *a) {
+  return vld1_f64_x3(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly64x1x3_t @test_vld1_p64_x3(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X1X3_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY64X1X3_T]] [[TMP10]]
+//
+poly64x1x3_t test_vld1_p64_x3(poly64_t const *a) {
+  return vld1_p64_x3(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x2x4_t @test_vld1q_f64_x4(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x double>, <2 x double>, <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X4_T]] [[TMP10]]
+//
+float64x2x4_t test_vld1q_f64_x4(float64_t const *a) {
+  return vld1q_f64_x4(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly64x2x4_t @test_vld1q_p64_x4(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X2X4_T]], ptr [[RETVAL]], align 16
+// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY64X2X4_T]] [[TMP10]]
+//
+poly64x2x4_t test_vld1q_p64_x4(poly64_t const *a) {
+  return vld1q_p64_x4(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.float64x1x4_t @test_vld1_f64_x4(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x double>, <1 x double>, <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X4_T]] [[TMP10]]
+//
+float64x1x4_t test_vld1_f64_x4(float64_t const *a) {
+  return vld1_f64_x4(a);
+}
+
+// CHECK-LABEL: define dso_local %struct.poly64x1x4_t @test_vld1_p64_x4(
+// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0(ptr [[A]])
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
+// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X1X4_T]], ptr [[RETVAL]], align 8
+// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
+// CHECK-NEXT:    ret [[STRUCT_POLY64X1X4_T]] [[TMP10]]
+//
+poly64x1x4_t test_vld1_p64_x4(poly64_t const *a) {
+  return vld1_p64_x4(a);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_f64_x2(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double> [[TMP14]], <2 x double> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_f64_x2(float64_t *a, float64x2x2_t b) {
+  vst1q_f64_x2(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_p64_x2(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X2X2_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X2X2_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [2 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP14]], <2 x i64> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_p64_x2(poly64_t *a, poly64x2x2_t b) {
+  vst1q_p64_x2(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_f64_x2(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double> [[TMP14]], <1 x double> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1_f64_x2(float64_t *a, float64x1x2_t b) {
+  vst1_f64_x2(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_p64_x2(
+// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X1X2_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X1X2_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X2_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [2 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X2_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP14]], <1 x i64> [[TMP15]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1_p64_x2(poly64_t *a, poly64x1x2_t b) {
+  vst1_p64_x2(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_f64_x3(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x double> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x double>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double> [[TMP16]], <2 x double> [[TMP17]], <2 x double> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_f64_x3(float64_t *a, float64x2x3_t b) {
+  vst1q_f64_x3(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_p64_x3(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X2X3_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X2X3_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [3 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP16]], <2 x i64> [[TMP17]], <2 x i64> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_p64_x3(poly64_t *a, poly64x2x3_t b) {
+  vst1q_p64_x3(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_f64_x3(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x double> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x double>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double> [[TMP16]], <1 x double> [[TMP17]], <1 x double> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1_f64_x3(float64_t *a, float64x1x3_t b) {
+  vst1_f64_x3(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_p64_x3(
+// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X1X3_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X1X3_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X3_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [3 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X3_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP16]], <1 x i64> [[TMP17]], <1 x i64> [[TMP18]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1_p64_x3(poly64_t *a, poly64x1x3_t b) {
+  vst1_p64_x3(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_f64_x4(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x double> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x double>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x double> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x double>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x double>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double> [[TMP18]], <2 x double> [[TMP19]], <2 x double> [[TMP20]], <2 x double> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_f64_x4(float64_t *a, float64x2x4_t b) {
+  vst1q_f64_x4(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1q_p64_x4(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X2X4_T:%.*]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X2X4_T]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
+// CHECK-NEXT:    store [4 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i64> [[TMP16]] to <16 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x i64>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP18]], <2 x i64> [[TMP19]], <2 x i64> [[TMP20]], <2 x i64> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1q_p64_x4(poly64_t *a, poly64x2x4_t b) {
+  vst1q_p64_x4(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_f64_x4(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x double> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x double>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x double> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x double>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x double>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double> [[TMP18]], <1 x double> [[TMP19]], <1 x double> [[TMP20]], <1 x double> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1_f64_x4(float64_t *a, float64x1x4_t b) {
+  vst1_f64_x4(a, b);
+}
+
+// CHECK-LABEL: define dso_local void @test_vst1_p64_x4(
+// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    call void @llvm.donothing()
+// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X1X4_T:%.*]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
+// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X1X4_T]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
+// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
+// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[B]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
+// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
+// CHECK-NEXT:    store [4 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
+// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
+// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
+// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
+// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
+// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
+// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
+// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
+// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
+// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
+// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[__S1]], i32 0, i32 0
+// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3
+// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x i64> [[TMP16]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
+// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
+// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x i64>
+// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP18]], <1 x i64> [[TMP19]], <1 x i64> [[TMP20]], <1 x i64> [[TMP21]], ptr [[A]])
+// CHECK-NEXT:    ret void
+//
+void test_vst1_p64_x4(poly64_t *a, poly64x1x4_t b) {
+  vst1_p64_x4(a, b);
+}
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index c7d41f6298372..f3f628333dec8 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -181,6 +181,7 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
 #include "llvm/IR/IntrinsicsX86.h"
 #include "llvm/IR/MDBuilder.h"
 #include "llvm/IR/Module.h"
@@ -3865,6 +3866,141 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     setOriginForNaryOp(I);
   }
 
+  // Given two shadows AAAA..., BBBB..., return the interleaved value
+  // ABABABAB ...
+  Value *interleaveAB(IRBuilder<> &IRB, Value *left, Value *right, uint Width) {
+    assert(isa<FixedVectorType>(left->getType()));
+    assert(isa<FixedVectorType>(right->getType()));
+
+    SmallVector<Constant *> Idxs;
+
+    for (uint i = 0; i < Width; i++) {
+      Idxs.push_back(IRB.getInt32(i));
+      Idxs.push_back(IRB.getInt32(i + Width));
+    }
+
+    return IRB.CreateShuffleVector(left, right, ConstantVector::get(Idxs));
+  }
+
+  // Given three shadows, which are already interleaved into two shadows
+  // ABABABAB and CxCxCxCx (x is undef), return the interleaved value ABCABCABC.
+  Value *interleaveABCx(IRBuilder<> &IRB, Value *left, Value *right,
+                        uint Width) {
+    assert(isa<FixedVectorType>(left->getType()));
+    assert(isa<FixedVectorType>(right->getType()));
+
+    SmallVector<Constant *> Idxs;
+
+    // Width parameter is the width of a single shadow (e.g., A).
+    // The width of AB (or Cx) is Width * 2.
+    for (uint i = 0; i < Width * 2; i += 2) {
+      Idxs.push_back(IRB.getInt32(i));
+      Idxs.push_back(IRB.getInt32(i + 1));
+      Idxs.push_back(IRB.getInt32(i + Width));
+      // Index (i + 1 + Width) contains Undef; don't copy
+    }
+
+    return IRB.CreateShuffleVector(left, right, ConstantVector::get(Idxs));
+  }
+
+  Value *interleaveShadowOrOrigin(IRBuilder<> &IRB, IntrinsicInst &I,
+                                  bool shadowMode) {
+    // Call arguments only
+    int numArgOperands = I.getNumOperands() - 1;
+    assert(numArgOperands >= 1);
+
+    int numVectors = numArgOperands - 1;
+
+    for (int i = 0; i < numVectors; i++) {
+      assert(isa<FixedVectorType>(I.getArgOperand(i)->getType()));
+    }
+
+    // Last operand is the destination
+    assert(isa<PointerType>(I.getArgOperand(numArgOperands - 1)->getType()));
+    errs() << "Assertions ok\n";
+
+    for (unsigned int i = 0; i < I.getNumOperands(); i++) {
+      errs() << "Operand " << i << ": " << I.getOperand(i)->getName() << "\n";
+    }
+
+    uint16_t Width =
+        cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
+    if (!shadowMode) {
+      Width = Width / 4; // One origin value per 32-bits of app memory
+    }
+    uint16_t ElemSize = cast<FixedVectorType>(I.getArgOperand(0)->getType())
+                            ->getElementType()
+                            ->getPrimitiveSizeInBits();
+
+    dumpInst(I);
+    errs() << "Num operands: " << I.getNumOperands() << "\n";
+    errs() << "Num arg operands: " << numArgOperands << "\n";
+    errs() << "Num vectors: " << numVectors << "\n";
+    errs() << "Width: " << Width << "\n";
+    errs() << "Elem size: " << ElemSize << "\n";
+
+    Value *interleaved = nullptr;
+    if (numVectors == 1) {
+      interleaved = getShadow(&I, 0);
+    } else if (numVectors == 2) {
+      interleaved =
+          interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
+    } else if (numVectors == 3) {
+      Value *UndefV = UndefValue::get(getShadow(&I, 0)->getType());
+      Value *AB = interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
+      Value *Cx = interleaveAB(IRB, getShadow(&I, 2), UndefV, Width);
+      interleaved = interleaveABCx(IRB, AB, Cx, Width);
+    } else if (numVectors == 4) {
+      Value *AB = interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
+      Value *CD = interleaveAB(IRB, getShadow(&I, 2), getShadow(&I, 3), Width);
+      interleaved = interleaveAB(IRB, AB, CD, Width * 2);
+    } else {
+      //          assert(! "Unexpected number of vectors");
+    }
+
+    return interleaved;
+  }
+
+  /// Handle Arm NEON vector store intrinsics (vst{1,2,3,4}).
+  ///
+  /// Arm NEON vector store intrinsics have the output address (pointer) as the
+  /// last argument, with the initial arguments being the inputs. They return
+  /// void.
+  void handleNEONVectorStoreIntrinsic(IntrinsicInst &I) {
+    IRBuilder<> IRB(&I);
+
+    Value *interleavedShadow = interleaveShadowOrOrigin(IRB, I, true);
+
+    // Call arguments only
+    int numArgOperands = I.getNumOperands() - 1;
+    assert(numArgOperands >= 1);
+    Value *Addr = I.getArgOperand(numArgOperands - 1);
+
+    Value *ShadowPtr, *OriginPtr;
+    std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
+        Addr, IRB, interleavedShadow->getType(), Align(1), /*isStore*/ true);
+    IRB.CreateAlignedStore(interleavedShadow, ShadowPtr, Align(1));
+
+    if (MS.TrackOrigins) {
+      setOrigin(&I, getCleanOrigin());
+
+      /*
+            errs() << "Inserting origin information ...\n";
+            Value *interleavedOrigin = interleaveShadowOrOrigin (IRB, I, false);
+
+            errs() << "Adding store for origin ...\n";
+            IRB.CreateAlignedStore(interleavedOrigin, OriginPtr, Align(1));
+      */
+
+      //      setOriginForNaryIntrinsic(I, true);
+    }
+
+    if (ClCheckAccessAddress) {
+      errs() << "Inserting shadow check ...\n";
+      insertShadowCheck(Addr, &I);
+    }
+  }
+
   void visitIntrinsicInst(IntrinsicInst &I) {
     switch (I.getIntrinsicID()) {
     case Intrinsic::uadd_with_overflow:
@@ -4204,6 +4340,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       setOrigin(&I, getCleanOrigin());
       break;
 
+    case Intrinsic::aarch64_neon_st2:
+    case Intrinsic::aarch64_neon_st3:
+    case Intrinsic::aarch64_neon_st4: {
+      handleNEONVectorStoreIntrinsic(I);
+      break;
+    }
+
     default:
       if (!handleUnknownIntrinsic(I))
         visitInstruction(I);

>From 54ecd5e2631705db943e4eab6a006c6172cc07c4 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Tue, 16 Jul 2024 19:44:16 +0000
Subject: [PATCH 02/14] Update code and test

---
 .../CodeGen/aarch64-neon-intrinsics-msan.c    | 7595 -----------------
 .../Instrumentation/MemorySanitizer.cpp       |   44 +-
 .../MemorySanitizer/AArch64/neon_vst.ll       | 2166 +++--
 3 files changed, 1540 insertions(+), 8265 deletions(-)
 delete mode 100644 clang/test/CodeGen/aarch64-neon-intrinsics-msan.c

diff --git a/clang/test/CodeGen/aarch64-neon-intrinsics-msan.c b/clang/test/CodeGen/aarch64-neon-intrinsics-msan.c
deleted file mode 100644
index 4d2c9975dd1cf..0000000000000
--- a/clang/test/CodeGen/aarch64-neon-intrinsics-msan.c
+++ /dev/null
@@ -1,7595 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
-// RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
-// RUN:     -S -disable-O0-optnone \
-// RUN:  -flax-vector-conversions=none -emit-llvm -o - %s \
-// RUN: | opt -S -passes=mem2reg \
-// RUN: | opt -S -passes=msan \
-// RUN: | FileCheck %s
-
-// REQUIRES: aarch64-registered-target || arm-registered-target
-
-// Forked from aarch64-neon-intrinsics.c
-
-#include <arm_neon.h>
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vld1q_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
-//
-uint8x16_t test_vld1q_u8(uint8_t const *a) {
-  return vld1q_u8(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vld1q_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[A]], align 2
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
-//
-uint16x8_t test_vld1q_u16(uint16_t const *a) {
-  return vld1q_u16(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vld1q_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 4
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
-//
-uint32x4_t test_vld1q_u32(uint32_t const *a) {
-  return vld1q_u32(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x i64> @test_vld1q_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr [[A]], align 8
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x i64> [[TMP0]]
-//
-uint64x2_t test_vld1q_u64(uint64_t const *a) {
-  return vld1q_u64(a);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vld1q_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
-//
-int8x16_t test_vld1q_s8(int8_t const *a) {
-  return vld1q_s8(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vld1q_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[A]], align 2
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
-//
-int16x8_t test_vld1q_s16(int16_t const *a) {
-  return vld1q_s16(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vld1q_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr [[A]], align 4
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
-//
-int32x4_t test_vld1q_s32(int32_t const *a) {
-  return vld1q_s32(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x i64> @test_vld1q_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr [[A]], align 8
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x i64> [[TMP0]]
-//
-int64x2_t test_vld1q_s64(int64_t const *a) {
-  return vld1q_s64(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x half> @test_vld1q_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x half>, ptr [[A]], align 2
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x half> [[TMP0]]
-//
-float16x8_t test_vld1q_f16(float16_t const *a) {
-  return vld1q_f16(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x float> @test_vld1q_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[A]], align 4
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x float> [[TMP0]]
-//
-float32x4_t test_vld1q_f32(float32_t const *a) {
-  return vld1q_f32(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x double> @test_vld1q_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[A]], align 8
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x double> [[TMP0]]
-//
-float64x2_t test_vld1q_f64(float64_t const *a) {
-  return vld1q_f64(a);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vld1q_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <16 x i8> [[TMP0]]
-//
-poly8x16_t test_vld1q_p8(poly8_t const *a) {
-  return vld1q_p8(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vld1q_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i16>, ptr [[A]], align 2
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i16> [[TMP0]]
-//
-poly16x8_t test_vld1q_p16(poly16_t const *a) {
-  return vld1q_p16(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
-//
-uint8x8_t test_vld1_u8(uint8_t const *a) {
-  return vld1_u8(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 2
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
-//
-uint16x4_t test_vld1_u16(uint16_t const *a) {
-  return vld1_u16(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vld1_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A]], align 4
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x i32> [[TMP0]]
-//
-uint32x2_t test_vld1_u32(uint32_t const *a) {
-  return vld1_u32(a);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vld1_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x i64>, ptr [[A]], align 8
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <1 x i64> [[TMP0]]
-//
-uint64x1_t test_vld1_u64(uint64_t const *a) {
-  return vld1_u64(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
-//
-int8x8_t test_vld1_s8(int8_t const *a) {
-  return vld1_s8(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 2
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
-//
-int16x4_t test_vld1_s16(int16_t const *a) {
-  return vld1_s16(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vld1_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A]], align 4
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x i32> [[TMP0]]
-//
-int32x2_t test_vld1_s32(int32_t const *a) {
-  return vld1_s32(a);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vld1_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x i64>, ptr [[A]], align 8
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <1 x i64> [[TMP0]]
-//
-int64x1_t test_vld1_s64(int64_t const *a) {
-  return vld1_s64(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x half> @test_vld1_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x half>, ptr [[A]], align 2
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x half> [[TMP0]]
-//
-float16x4_t test_vld1_f16(float16_t const *a) {
-  return vld1_f16(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x float> @test_vld1_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[A]], align 4
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x float> [[TMP0]]
-//
-float32x2_t test_vld1_f32(float32_t const *a) {
-  return vld1_f32(a);
-}
-
-// CHECK-LABEL: define dso_local <1 x double> @test_vld1_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x double>, ptr [[A]], align 8
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <1 x double> [[TMP0]]
-//
-float64x1_t test_vld1_f64(float64_t const *a) {
-  return vld1_f64(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
-//
-poly8x8_t test_vld1_p8(poly8_t const *a) {
-  return vld1_p8(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 2
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
-//
-poly16x4_t test_vld1_p16(poly16_t const *a) {
-  return vld1_p16(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_u8_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
-//
-uint8x8_t test_vld1_u8_void(void *a) {
-  return vld1_u8(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_u16_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 1
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
-//
-uint16x4_t test_vld1_u16_void(void *a) {
-  return vld1_u16(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vld1_u32_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A]], align 1
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x i32> [[TMP0]]
-//
-uint32x2_t test_vld1_u32_void(void *a) {
-  return vld1_u32(a);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vld1_u64_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x i64>, ptr [[A]], align 1
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <1 x i64> [[TMP0]]
-//
-uint64x1_t test_vld1_u64_void(void *a) {
-  return vld1_u64(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_s8_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
-//
-int8x8_t test_vld1_s8_void(void *a) {
-  return vld1_s8(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_s16_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 1
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
-//
-int16x4_t test_vld1_s16_void(void *a) {
-  return vld1_s16(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vld1_s32_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[A]], align 1
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x i32> [[TMP0]]
-//
-int32x2_t test_vld1_s32_void(void *a) {
-  return vld1_s32(a);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vld1_s64_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x i64>, ptr [[A]], align 1
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <1 x i64> [[TMP0]]
-//
-int64x1_t test_vld1_s64_void(void *a) {
-  return vld1_s64(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x half> @test_vld1_f16_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x half>, ptr [[A]], align 1
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x half> [[TMP0]]
-//
-float16x4_t test_vld1_f16_void(void *a) {
-  return vld1_f16(a);
-}
-
-// CHECK-LABEL: define dso_local <2 x float> @test_vld1_f32_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[A]], align 1
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <2 x float> [[TMP0]]
-//
-float32x2_t test_vld1_f32_void(void *a) {
-  return vld1_f32(a);
-}
-
-// CHECK-LABEL: define dso_local <1 x double> @test_vld1_f64_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <1 x double>, ptr [[A]], align 1
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <1 x double> [[TMP0]]
-//
-float64x1_t test_vld1_f64_void(void *a) {
-  return vld1_f64(a);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vld1_p8_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i8>, ptr [[A]], align 1
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <8 x i8> [[TMP0]]
-//
-poly8x8_t test_vld1_p8_void(void *a) {
-  return vld1_p8(a);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vld1_p16_void(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i16>, ptr [[A]], align 1
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret <4 x i16> [[TMP0]]
-//
-poly16x4_t test_vld1_p16_void(void *a) {
-  return vld1_p16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint8x16x2_t @test_vld2q_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X16X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X16X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X16X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT8X16X2_T]] [[TMP10]]
-//
-uint8x16x2_t test_vld2q_u8(uint8_t const *a) {
-  return vld2q_u8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint16x8x2_t @test_vld2q_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X8X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X8X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X8X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT16X8X2_T]] [[TMP10]]
-//
-uint16x8x2_t test_vld2q_u16(uint16_t const *a) {
-  return vld2q_u16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint32x4x2_t @test_vld2q_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X4X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X4X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X4X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT32X4X2_T]] [[TMP10]]
-//
-uint32x4x2_t test_vld2q_u32(uint32_t const *a) {
-  return vld2q_u32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint64x2x2_t @test_vld2q_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X2X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT64X2X2_T]] [[TMP10]]
-//
-uint64x2x2_t test_vld2q_u64(uint64_t const *a) {
-  return vld2q_u64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int8x16x2_t @test_vld2q_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X16X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X16X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X16X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT8X16X2_T]] [[TMP10]]
-//
-int8x16x2_t test_vld2q_s8(int8_t const *a) {
-  return vld2q_s8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int16x8x2_t @test_vld2q_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X8X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X8X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X8X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT16X8X2_T]] [[TMP10]]
-//
-int16x8x2_t test_vld2q_s16(int16_t const *a) {
-  return vld2q_s16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int32x4x2_t @test_vld2q_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X4X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X4X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X4X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT32X4X2_T]] [[TMP10]]
-//
-int32x4x2_t test_vld2q_s32(int32_t const *a) {
-  return vld2q_s32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int64x2x2_t @test_vld2q_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X2X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT64X2X2_T]] [[TMP10]]
-//
-int64x2x2_t test_vld2q_s64(int64_t const *a) {
-  return vld2q_s64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float16x8x2_t @test_vld2q_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x half>, <8 x half> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X8X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT16X8X2_T]] [[TMP10]]
-//
-float16x8x2_t test_vld2q_f16(float16_t const *a) {
-  return vld2q_f16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float32x4x2_t @test_vld2q_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X4X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X4X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x float>, <4 x float> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X4X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT32X4X2_T]] [[TMP10]]
-//
-float32x4x2_t test_vld2q_f32(float32_t const *a) {
-  return vld2q_f32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x2x2_t @test_vld2q_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x double>, <2 x double> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X2_T]] [[TMP10]]
-//
-float64x2x2_t test_vld2q_f64(float64_t const *a) {
-  return vld2q_f64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly8x16x2_t @test_vld2q_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X16X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X16X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X16X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY8X16X2_T]] [[TMP10]]
-//
-poly8x16x2_t test_vld2q_p8(poly8_t const *a) {
-  return vld2q_p8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly16x8x2_t @test_vld2q_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X8X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X8X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16> } [[VLD2]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X8X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY16X8X2_T]] [[TMP10]]
-//
-poly16x8x2_t test_vld2q_p16(poly16_t const *a) {
-  return vld2q_p16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint8x8x2_t @test_vld2_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X8X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X8X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X8X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT8X8X2_T]] [[TMP10]]
-//
-uint8x8x2_t test_vld2_u8(uint8_t const *a) {
-  return vld2_u8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint16x4x2_t @test_vld2_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X4X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X4X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X4X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT16X4X2_T]] [[TMP10]]
-//
-uint16x4x2_t test_vld2_u16(uint16_t const *a) {
-  return vld2_u16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint32x2x2_t @test_vld2_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X2X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X2X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X2X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT32X2X2_T]] [[TMP10]]
-//
-uint32x2x2_t test_vld2_u32(uint32_t const *a) {
-  return vld2_u32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint64x1x2_t @test_vld2_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X1X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT64X1X2_T]] [[TMP10]]
-//
-uint64x1x2_t test_vld2_u64(uint64_t const *a) {
-  return vld2_u64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int8x8x2_t @test_vld2_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X8X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X8X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X8X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT8X8X2_T]] [[TMP10]]
-//
-int8x8x2_t test_vld2_s8(int8_t const *a) {
-  return vld2_s8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int16x4x2_t @test_vld2_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X4X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X4X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X4X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT16X4X2_T]] [[TMP10]]
-//
-int16x4x2_t test_vld2_s16(int16_t const *a) {
-  return vld2_s16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int32x2x2_t @test_vld2_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X2X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X2X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X2X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT32X2X2_T]] [[TMP10]]
-//
-int32x2x2_t test_vld2_s32(int32_t const *a) {
-  return vld2_s32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int64x1x2_t @test_vld2_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X1X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT64X1X2_T]] [[TMP10]]
-//
-int64x1x2_t test_vld2_s64(int64_t const *a) {
-  return vld2_s64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float16x4x2_t @test_vld2_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x half>, <4 x half> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X4X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT16X4X2_T]] [[TMP10]]
-//
-float16x4x2_t test_vld2_f16(float16_t const *a) {
-  return vld2_f16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float32x2x2_t @test_vld2_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X2X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X2X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x float>, <2 x float> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X2X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT32X2X2_T]] [[TMP10]]
-//
-float32x2x2_t test_vld2_f32(float32_t const *a) {
-  return vld2_f32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x1x2_t @test_vld2_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x double>, <1 x double> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X2_T]] [[TMP10]]
-//
-float64x1x2_t test_vld2_f64(float64_t const *a) {
-  return vld2_f64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly8x8x2_t @test_vld2_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X8X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X8X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X8X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY8X8X2_T]] [[TMP10]]
-//
-poly8x8x2_t test_vld2_p8(poly8_t const *a) {
-  return vld2_p8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly16x4x2_t @test_vld2_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X4X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X4X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD2:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16> } [[VLD2]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X4X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY16X4X2_T]] [[TMP10]]
-//
-poly16x4x2_t test_vld2_p16(poly16_t const *a) {
-  return vld2_p16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint8x16x3_t @test_vld3q_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X16X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X16X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X16X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT8X16X3_T]] [[TMP10]]
-//
-uint8x16x3_t test_vld3q_u8(uint8_t const *a) {
-  return vld3q_u8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint16x8x3_t @test_vld3q_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X8X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X8X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X8X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT16X8X3_T]] [[TMP10]]
-//
-uint16x8x3_t test_vld3q_u16(uint16_t const *a) {
-  return vld3q_u16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint32x4x3_t @test_vld3q_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X4X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X4X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X4X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT32X4X3_T]] [[TMP10]]
-//
-uint32x4x3_t test_vld3q_u32(uint32_t const *a) {
-  return vld3q_u32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint64x2x3_t @test_vld3q_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X2X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT64X2X3_T]] [[TMP10]]
-//
-uint64x2x3_t test_vld3q_u64(uint64_t const *a) {
-  return vld3q_u64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int8x16x3_t @test_vld3q_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X16X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X16X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X16X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT8X16X3_T]] [[TMP10]]
-//
-int8x16x3_t test_vld3q_s8(int8_t const *a) {
-  return vld3q_s8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int16x8x3_t @test_vld3q_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X8X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X8X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X8X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT16X8X3_T]] [[TMP10]]
-//
-int16x8x3_t test_vld3q_s16(int16_t const *a) {
-  return vld3q_s16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int32x4x3_t @test_vld3q_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X4X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X4X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X4X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT32X4X3_T]] [[TMP10]]
-//
-int32x4x3_t test_vld3q_s32(int32_t const *a) {
-  return vld3q_s32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int64x2x3_t @test_vld3q_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X2X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT64X2X3_T]] [[TMP10]]
-//
-int64x2x3_t test_vld3q_s64(int64_t const *a) {
-  return vld3q_s64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float16x8x3_t @test_vld3q_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X8X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x half>, <8 x half>, <8 x half> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X8X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT16X8X3_T]] [[TMP10]]
-//
-float16x8x3_t test_vld3q_f16(float16_t const *a) {
-  return vld3q_f16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float32x4x3_t @test_vld3q_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X4X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X4X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x float>, <4 x float>, <4 x float> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X4X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT32X4X3_T]] [[TMP10]]
-//
-float32x4x3_t test_vld3q_f32(float32_t const *a) {
-  return vld3q_f32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x2x3_t @test_vld3q_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x double>, <2 x double>, <2 x double> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X3_T]] [[TMP10]]
-//
-float64x2x3_t test_vld3q_f64(float64_t const *a) {
-  return vld3q_f64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly8x16x3_t @test_vld3q_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X16X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X16X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X16X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY8X16X3_T]] [[TMP10]]
-//
-poly8x16x3_t test_vld3q_p8(poly8_t const *a) {
-  return vld3q_p8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly16x8x3_t @test_vld3q_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X8X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X8X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16> } [[VLD3]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X8X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY16X8X3_T]] [[TMP10]]
-//
-poly16x8x3_t test_vld3q_p16(poly16_t const *a) {
-  return vld3q_p16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint8x8x3_t @test_vld3_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X8X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X8X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X8X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT8X8X3_T]] [[TMP10]]
-//
-uint8x8x3_t test_vld3_u8(uint8_t const *a) {
-  return vld3_u8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint16x4x3_t @test_vld3_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X4X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X4X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X4X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT16X4X3_T]] [[TMP10]]
-//
-uint16x4x3_t test_vld3_u16(uint16_t const *a) {
-  return vld3_u16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint32x2x3_t @test_vld3_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X2X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X2X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X2X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT32X2X3_T]] [[TMP10]]
-//
-uint32x2x3_t test_vld3_u32(uint32_t const *a) {
-  return vld3_u32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint64x1x3_t @test_vld3_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X1X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT64X1X3_T]] [[TMP10]]
-//
-uint64x1x3_t test_vld3_u64(uint64_t const *a) {
-  return vld3_u64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int8x8x3_t @test_vld3_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X8X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X8X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X8X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT8X8X3_T]] [[TMP10]]
-//
-int8x8x3_t test_vld3_s8(int8_t const *a) {
-  return vld3_s8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int16x4x3_t @test_vld3_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X4X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X4X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X4X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT16X4X3_T]] [[TMP10]]
-//
-int16x4x3_t test_vld3_s16(int16_t const *a) {
-  return vld3_s16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int32x2x3_t @test_vld3_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X2X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X2X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X2X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT32X2X3_T]] [[TMP10]]
-//
-int32x2x3_t test_vld3_s32(int32_t const *a) {
-  return vld3_s32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int64x1x3_t @test_vld3_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X1X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT64X1X3_T]] [[TMP10]]
-//
-int64x1x3_t test_vld3_s64(int64_t const *a) {
-  return vld3_s64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float16x4x3_t @test_vld3_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X4X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x half>, <4 x half>, <4 x half> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X4X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT16X4X3_T]] [[TMP10]]
-//
-float16x4x3_t test_vld3_f16(float16_t const *a) {
-  return vld3_f16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float32x2x3_t @test_vld3_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X2X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X2X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x float>, <2 x float>, <2 x float> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X2X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT32X2X3_T]] [[TMP10]]
-//
-float32x2x3_t test_vld3_f32(float32_t const *a) {
-  return vld3_f32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x1x3_t @test_vld3_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x double>, <1 x double>, <1 x double> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X3_T]] [[TMP10]]
-//
-float64x1x3_t test_vld3_f64(float64_t const *a) {
-  return vld3_f64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly8x8x3_t @test_vld3_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X8X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X8X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X8X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY8X8X3_T]] [[TMP10]]
-//
-poly8x8x3_t test_vld3_p8(poly8_t const *a) {
-  return vld3_p8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly16x4x3_t @test_vld3_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X4X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X4X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD3:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16> } [[VLD3]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X4X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY16X4X3_T]] [[TMP10]]
-//
-poly16x4x3_t test_vld3_p16(poly16_t const *a) {
-  return vld3_p16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint8x16x4_t @test_vld4q_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X16X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X16X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X16X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT8X16X4_T]] [[TMP10]]
-//
-uint8x16x4_t test_vld4q_u8(uint8_t const *a) {
-  return vld4q_u8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint16x8x4_t @test_vld4q_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X8X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X8X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X8X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT16X8X4_T]] [[TMP10]]
-//
-uint16x8x4_t test_vld4q_u16(uint16_t const *a) {
-  return vld4q_u16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint32x4x4_t @test_vld4q_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X4X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X4X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X4X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT32X4X4_T]] [[TMP10]]
-//
-uint32x4x4_t test_vld4q_u32(uint32_t const *a) {
-  return vld4q_u32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint64x2x4_t @test_vld4q_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X2X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT64X2X4_T]] [[TMP10]]
-//
-uint64x2x4_t test_vld4q_u64(uint64_t const *a) {
-  return vld4q_u64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int8x16x4_t @test_vld4q_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X16X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X16X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X16X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT8X16X4_T]] [[TMP10]]
-//
-int8x16x4_t test_vld4q_s8(int8_t const *a) {
-  return vld4q_s8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int16x8x4_t @test_vld4q_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X8X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X8X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X8X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT16X8X4_T]] [[TMP10]]
-//
-int16x8x4_t test_vld4q_s16(int16_t const *a) {
-  return vld4q_s16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int32x4x4_t @test_vld4q_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X4X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X4X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X4X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT32X4X4_T]] [[TMP10]]
-//
-int32x4x4_t test_vld4q_s32(int32_t const *a) {
-  return vld4q_s32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int64x2x4_t @test_vld4q_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X2X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT64X2X4_T]] [[TMP10]]
-//
-int64x2x4_t test_vld4q_s64(int64_t const *a) {
-  return vld4q_s64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float16x8x4_t @test_vld4q_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X8X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X8X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x half>, <8 x half>, <8 x half>, <8 x half> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X8X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT16X8X4_T]] [[TMP10]]
-//
-float16x8x4_t test_vld4q_f16(float16_t const *a) {
-  return vld4q_f16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float32x4x4_t @test_vld4q_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X4X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X4X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <4 x float>, <4 x float>, <4 x float>, <4 x float> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X4X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <4 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT32X4X4_T]] [[TMP10]]
-//
-float32x4x4_t test_vld4q_f32(float32_t const *a) {
-  return vld4q_f32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x2x4_t @test_vld4q_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x double>, <2 x double>, <2 x double>, <2 x double> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X4_T]] [[TMP10]]
-//
-float64x2x4_t test_vld4q_f64(float64_t const *a) {
-  return vld4q_f64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly8x16x4_t @test_vld4q_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X16X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X16X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X16X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <16 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY8X16X4_T]] [[TMP10]]
-//
-poly8x16x4_t test_vld4q_p8(poly8_t const *a) {
-  return vld4q_p8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly16x8x4_t @test_vld4q_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X8X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X8X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } [[VLD4]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X8X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <8 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY16X8X4_T]] [[TMP10]]
-//
-poly16x8x4_t test_vld4q_p16(poly16_t const *a) {
-  return vld4q_p16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint8x8x4_t @test_vld4_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT8X8X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT8X8X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT8X8X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT8X8X4_T]] [[TMP10]]
-//
-uint8x8x4_t test_vld4_u8(uint8_t const *a) {
-  return vld4_u8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint16x4x4_t @test_vld4_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT16X4X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT16X4X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT16X4X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT16X4X4_T]] [[TMP10]]
-//
-uint16x4x4_t test_vld4_u16(uint16_t const *a) {
-  return vld4_u16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint32x2x4_t @test_vld4_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT32X2X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT32X2X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT32X2X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT32X2X4_T]] [[TMP10]]
-//
-uint32x2x4_t test_vld4_u32(uint32_t const *a) {
-  return vld4_u32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.uint64x1x4_t @test_vld4_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_UINT64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_UINT64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_UINT64X1X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_UINT64X1X4_T]] [[TMP10]]
-//
-uint64x1x4_t test_vld4_u64(uint64_t const *a) {
-  return vld4_u64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int8x8x4_t @test_vld4_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT8X8X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT8X8X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT8X8X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT8X8X4_T]] [[TMP10]]
-//
-int8x8x4_t test_vld4_s8(int8_t const *a) {
-  return vld4_s8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int16x4x4_t @test_vld4_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT16X4X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT16X4X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT16X4X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT16X4X4_T]] [[TMP10]]
-//
-int16x4x4_t test_vld4_s16(int16_t const *a) {
-  return vld4_s16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int32x2x4_t @test_vld4_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT32X2X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT32X2X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT32X2X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT32X2X4_T]] [[TMP10]]
-//
-int32x2x4_t test_vld4_s32(int32_t const *a) {
-  return vld4_s32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.int64x1x4_t @test_vld4_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_INT64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_INT64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_INT64X1X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_INT64X1X4_T]] [[TMP10]]
-//
-int64x1x4_t test_vld4_s64(int64_t const *a) {
-  return vld4_s64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float16x4x4_t @test_vld4_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT16X4X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT16X4X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x half>, <4 x half>, <4 x half>, <4 x half> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT16X4X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT16X4X4_T]] [[TMP10]]
-//
-float16x4x4_t test_vld4_f16(float16_t const *a) {
-  return vld4_f16(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float32x2x4_t @test_vld4_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT32X2X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT32X2X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <2 x float>, <2 x float>, <2 x float>, <2 x float> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT32X2X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <2 x i32>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT32X2X4_T]] [[TMP10]]
-//
-float32x2x4_t test_vld4_f32(float32_t const *a) {
-  return vld4_f32(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x1x4_t @test_vld4_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x double>, <1 x double>, <1 x double>, <1 x double> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X4_T]] [[TMP10]]
-//
-float64x1x4_t test_vld4_f64(float64_t const *a) {
-  return vld4_f64(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly8x8x4_t @test_vld4_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY8X8X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY8X8X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY8X8X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <8 x i8>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY8X8X4_T]] [[TMP10]]
-//
-poly8x8x4_t test_vld4_p8(poly8_t const *a) {
-  return vld4_p8(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly16x4x4_t @test_vld4_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY16X4X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY16X4X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD4:%.*]] = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } [[VLD4]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY16X4X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <4 x i16>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY16X4X4_T]] [[TMP10]]
-//
-poly16x4x4_t test_vld4_p16(poly16_t const *a) {
-  return vld4_p16(a);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP2]], align 1
-// CHECK-NEXT:    store <16 x i8> [[B]], ptr [[A]], align 1
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_u8(uint8_t *a, uint8x16_t b) {
-  vst1q_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP4]], align 2
-// CHECK-NEXT:    store <8 x i16> [[TMP1]], ptr [[A]], align 2
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_u16(uint16_t *a, uint16x8_t b) {
-  vst1q_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP4]], align 4
-// CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[A]], align 4
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_u32(uint32_t *a, uint32x4_t b) {
-  vst1q_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP4]], align 8
-// CHECK-NEXT:    store <2 x i64> [[TMP1]], ptr [[A]], align 8
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_u64(uint64_t *a, uint64x2_t b) {
-  vst1q_u64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP2]], align 1
-// CHECK-NEXT:    store <16 x i8> [[B]], ptr [[A]], align 1
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_s8(int8_t *a, int8x16_t b) {
-  vst1q_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP4]], align 2
-// CHECK-NEXT:    store <8 x i16> [[TMP1]], ptr [[A]], align 2
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_s16(int16_t *a, int16x8_t b) {
-  vst1q_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP4]], align 4
-// CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr [[A]], align 4
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_s32(int32_t *a, int32x4_t b) {
-  vst1q_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]], <2 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP4]], align 8
-// CHECK-NEXT:    store <2 x i64> [[TMP1]], ptr [[A]], align 8
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_s64(int64_t *a, int64x2_t b) {
-  vst1q_s64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]], <8 x half> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x half>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP4]], align 2
-// CHECK-NEXT:    store <8 x half> [[TMP1]], ptr [[A]], align 2
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_f16(float16_t *a, float16x8_t b) {
-  vst1q_f16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP4]], align 4
-// CHECK-NEXT:    store <4 x float> [[TMP1]], ptr [[A]], align 4
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_f32(float32_t *a, float32x4_t b) {
-  vst1q_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]], <2 x double> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP4]], align 8
-// CHECK-NEXT:    store <2 x double> [[TMP1]], ptr [[A]], align 8
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_f64(float64_t *a, float64x2_t b) {
-  vst1q_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP2]], align 1
-// CHECK-NEXT:    store <16 x i8> [[B]], ptr [[A]], align 1
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_p8(poly8_t *a, poly8x16_t b) {
-  vst1q_p8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP4]], align 2
-// CHECK-NEXT:    store <8 x i16> [[TMP1]], ptr [[A]], align 2
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_p16(poly16_t *a, poly16x8_t b) {
-  vst1q_p16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr [[TMP2]], align 1
-// CHECK-NEXT:    store <8 x i8> [[B]], ptr [[A]], align 1
-// CHECK-NEXT:    ret void
-//
-void test_vst1_u8(uint8_t *a, uint8x8_t b) {
-  vst1_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 2
-// CHECK-NEXT:    store <4 x i16> [[TMP1]], ptr [[A]], align 2
-// CHECK-NEXT:    ret void
-//
-void test_vst1_u16(uint16_t *a, uint16x4_t b) {
-  vst1_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP4]], align 4
-// CHECK-NEXT:    store <2 x i32> [[TMP1]], ptr [[A]], align 4
-// CHECK-NEXT:    ret void
-//
-void test_vst1_u32(uint32_t *a, uint32x2_t b) {
-  vst1_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]], <1 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr [[TMP4]], align 8
-// CHECK-NEXT:    store <1 x i64> [[TMP1]], ptr [[A]], align 8
-// CHECK-NEXT:    ret void
-//
-void test_vst1_u64(uint64_t *a, uint64x1_t b) {
-  vst1_u64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr [[TMP2]], align 1
-// CHECK-NEXT:    store <8 x i8> [[B]], ptr [[A]], align 1
-// CHECK-NEXT:    ret void
-//
-void test_vst1_s8(int8_t *a, int8x8_t b) {
-  vst1_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 2
-// CHECK-NEXT:    store <4 x i16> [[TMP1]], ptr [[A]], align 2
-// CHECK-NEXT:    ret void
-//
-void test_vst1_s16(int16_t *a, int16x4_t b) {
-  vst1_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP4]], align 4
-// CHECK-NEXT:    store <2 x i32> [[TMP1]], ptr [[A]], align 4
-// CHECK-NEXT:    ret void
-//
-void test_vst1_s32(int32_t *a, int32x2_t b) {
-  vst1_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]], <1 x i64> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr [[TMP4]], align 8
-// CHECK-NEXT:    store <1 x i64> [[TMP1]], ptr [[A]], align 8
-// CHECK-NEXT:    ret void
-//
-void test_vst1_s64(int64_t *a, int64x1_t b) {
-  vst1_s64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]], <4 x half> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x half>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 2
-// CHECK-NEXT:    store <4 x half> [[TMP1]], ptr [[A]], align 2
-// CHECK-NEXT:    ret void
-//
-void test_vst1_f16(float16_t *a, float16x4_t b) {
-  vst1_f16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]], <2 x float> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP4]], align 4
-// CHECK-NEXT:    store <2 x float> [[TMP1]], ptr [[A]], align 4
-// CHECK-NEXT:    ret void
-//
-void test_vst1_f32(float32_t *a, float32x2_t b) {
-  vst1_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <1 x i64> zeroinitializer, ptr [[TMP4]], align 8
-// CHECK-NEXT:    store <1 x double> [[TMP1]], ptr [[A]], align 8
-// CHECK-NEXT:    ret void
-//
-void test_vst1_f64(float64_t *a, float64x1_t b) {
-  vst1_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    store <8 x i8> zeroinitializer, ptr [[TMP2]], align 1
-// CHECK-NEXT:    store <8 x i8> [[B]], ptr [[A]], align 1
-// CHECK-NEXT:    ret void
-//
-void test_vst1_p8(poly8_t *a, poly8x8_t b) {
-  vst1_p8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 193514046488576
-// CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
-// CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 2
-// CHECK-NEXT:    store <4 x i16> [[TMP1]], ptr [[A]], align 2
-// CHECK-NEXT:    ret void
-//
-void test_vst1_p16(poly16_t *a, poly16x4_t b) {
-  vst1_p16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X16X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X16X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
-// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP14]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_u8(uint8_t *a, uint8x16x2_t b) {
-  vst2q_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X8X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X8X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP14]], <8 x i16> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_u16(uint16_t *a, uint16x8x2_t b) {
-  vst2q_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X4X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X4X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP14]], <4 x i32> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_u32(uint32_t *a, uint32x4x2_t b) {
-  vst2q_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP14]], <2 x i64> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_u64(uint64_t *a, uint64x2x2_t b) {
-  vst2q_u64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X16X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X16X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
-// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP14]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_s8(int8_t *a, int8x16x2_t b) {
-  vst2q_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X8X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X8X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP14]], <8 x i16> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_s16(int16_t *a, int16x8x2_t b) {
-  vst2q_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X4X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X4X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[TMP14]], <4 x i32> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_s32(int32_t *a, int32x4x2_t b) {
-  vst2q_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[TMP14]], <2 x i64> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_s64(int64_t *a, int64x2x2_t b) {
-  vst2q_s64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X8X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <8 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x half>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x half>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x half> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x half>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8f16.p0(<8 x half> [[TMP14]], <8 x half> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_f16(float16_t *a, float16x8x2_t b) {
-  vst2q_f16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X4X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X4X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <4 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x float>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x float> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x float>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x float> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x float>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x float>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> [[TMP14]], <4 x float> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_f32(float32_t *a, float32x4x2_t b) {
-  vst2q_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2f64.p0(<2 x double> [[TMP14]], <2 x double> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_f64(float64_t *a, float64x2x2_t b) {
-  vst2q_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X16X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X16X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
-// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP14]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_p8(poly8_t *a, poly8x16x2_t b) {
-  vst2q_p8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2q_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X8X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X8X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[TMP14]], <8 x i16> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2q_p16(poly16_t *a, poly16x8x2_t b) {
-  vst2q_p16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X8X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X8X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
-// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP14]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_u8(uint8_t *a, uint8x8x2_t b) {
-  vst2_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X4X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X4X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP14]], <4 x i16> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_u16(uint16_t *a, uint16x4x2_t b) {
-  vst2_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X2X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X2X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP14]], <2 x i32> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_u32(uint32_t *a, uint32x2x2_t b) {
-  vst2_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP14]], <1 x i64> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_u64(uint64_t *a, uint64x1x2_t b) {
-  vst2_u64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X8X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X8X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
-// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP14]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_s8(int8_t *a, int8x8x2_t b) {
-  vst2_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X4X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X4X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP14]], <4 x i16> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_s16(int16_t *a, int16x4x2_t b) {
-  vst2_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X2X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X2X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[TMP14]], <2 x i32> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_s32(int32_t *a, int32x2x2_t b) {
-  vst2_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[TMP14]], <1 x i64> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_s64(int64_t *a, int64x1x2_t b) {
-  vst2_s64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X4X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <4 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x half>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x half>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x half> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x half>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4f16.p0(<4 x half> [[TMP14]], <4 x half> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_f16(float16_t *a, float16x4x2_t b) {
-  vst2_f16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X2X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X2X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <2 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x float>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x float> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x float>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x float> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x float>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x float>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2f32.p0(<2 x float> [[TMP14]], <2 x float> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_f32(float32_t *a, float32x2x2_t b) {
-  vst2_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1f64.p0(<1 x double> [[TMP14]], <1 x double> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_f64(float64_t *a, float64x1x2_t b) {
-  vst2_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X8X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X8X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
-// CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-// CHECK-NEXT:    store <16 x i8> zeroinitializer, ptr [[TMP14]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_p8(poly8_t *a, poly8x8x2_t b) {
-  vst2_p8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst2_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X4X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X4X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 193514046488576
-// CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
-// CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP18]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[TMP14]], <4 x i16> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst2_p16(poly16_t *a, poly16x4x2_t b) {
-  vst2_p16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X16X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X16X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
-// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-// CHECK-NEXT:    store <48 x i8> zeroinitializer, ptr [[TMP15]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_u8(uint8_t *a, uint8x16x3_t b) {
-  vst3q_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X8X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X8X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <24 x i16> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP16]], <8 x i16> [[TMP17]], <8 x i16> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_u16(uint16_t *a, uint16x8x3_t b) {
-  vst3q_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X4X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X4X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i32> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <12 x i32> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP16]], <4 x i32> [[TMP17]], <4 x i32> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_u32(uint32_t *a, uint32x4x3_t b) {
-  vst3q_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <6 x i64> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP16]], <2 x i64> [[TMP17]], <2 x i64> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_u64(uint64_t *a, uint64x2x3_t b) {
-  vst3q_u64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X16X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X16X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
-// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-// CHECK-NEXT:    store <48 x i8> zeroinitializer, ptr [[TMP15]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_s8(int8_t *a, int8x16x3_t b) {
-  vst3q_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X8X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X8X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <24 x i16> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP16]], <8 x i16> [[TMP17]], <8 x i16> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_s16(int16_t *a, int16x8x3_t b) {
-  vst3q_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X4X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X4X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i32> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <12 x i32> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[TMP16]], <4 x i32> [[TMP17]], <4 x i32> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_s32(int32_t *a, int32x4x3_t b) {
-  vst3q_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <6 x i64> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[TMP16]], <2 x i64> [[TMP17]], <2 x i64> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_s64(int64_t *a, int64x2x3_t b) {
-  vst3q_s64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X8X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X8X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <8 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x half> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x half>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x half>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x half> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x half>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x half>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <24 x i16> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8f16.p0(<8 x half> [[TMP16]], <8 x half> [[TMP17]], <8 x half> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_f16(float16_t *a, float16x8x3_t b) {
-  vst3q_f16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X4X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X4X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <4 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x float> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x float> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x float>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x float> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x float>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x float>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x float>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <12 x i32> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float> [[TMP16]], <4 x float> [[TMP17]], <4 x float> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_f32(float32_t *a, float32x4x3_t b) {
-  vst3q_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x double> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x double>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <6 x i64> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2f64.p0(<2 x double> [[TMP16]], <2 x double> [[TMP17]], <2 x double> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_f64(float64_t *a, float64x2x3_t b) {
-  vst3q_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X16X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X16X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
-// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-// CHECK-NEXT:    store <48 x i8> zeroinitializer, ptr [[TMP15]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_p8(poly8_t *a, poly8x16x3_t b) {
-  vst3q_p8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3q_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X8X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X8X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <24 x i16> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[TMP16]], <8 x i16> [[TMP17]], <8 x i16> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3q_p16(poly16_t *a, poly16x8x3_t b) {
-  vst3q_p16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X8X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X8X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
-// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-// CHECK-NEXT:    store <24 x i8> zeroinitializer, ptr [[TMP15]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_u8(uint8_t *a, uint8x8x3_t b) {
-  vst3_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X4X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X4X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <12 x i16> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP16]], <4 x i16> [[TMP17]], <4 x i16> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_u16(uint16_t *a, uint16x4x3_t b) {
-  vst3_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X2X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X2X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i32> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x i32>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <6 x i32> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP16]], <2 x i32> [[TMP17]], <2 x i32> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_u32(uint32_t *a, uint32x2x3_t b) {
-  vst3_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <3 x i64> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP16]], <1 x i64> [[TMP17]], <1 x i64> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_u64(uint64_t *a, uint64x1x3_t b) {
-  vst3_u64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X8X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X8X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
-// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-// CHECK-NEXT:    store <24 x i8> zeroinitializer, ptr [[TMP15]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_s8(int8_t *a, int8x8x3_t b) {
-  vst3_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X4X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X4X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <12 x i16> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP16]], <4 x i16> [[TMP17]], <4 x i16> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_s16(int16_t *a, int16x4x3_t b) {
-  vst3_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X2X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X2X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i32> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x i32>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <6 x i32> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[TMP16]], <2 x i32> [[TMP17]], <2 x i32> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_s32(int32_t *a, int32x2x3_t b) {
-  vst3_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <3 x i64> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[TMP16]], <1 x i64> [[TMP17]], <1 x i64> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_s64(int64_t *a, int64x1x3_t b) {
-  vst3_s64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X4X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X4X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <4 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x half> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x half>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x half>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x half> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x half>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x half>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <12 x i16> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4f16.p0(<4 x half> [[TMP16]], <4 x half> [[TMP17]], <4 x half> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_f16(float16_t *a, float16x4x3_t b) {
-  vst3_f16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X2X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X2X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <2 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x float> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x float> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x float>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x float>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x float> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x float>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x float>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x float>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <6 x i32> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2f32.p0(<2 x float> [[TMP16]], <2 x float> [[TMP17]], <2 x float> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_f32(float32_t *a, float32x2x3_t b) {
-  vst3_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x double> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x double>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <3 x i64> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1f64.p0(<1 x double> [[TMP16]], <1 x double> [[TMP17]], <1 x double> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_f64(float64_t *a, float64x1x3_t b) {
-  vst3_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X8X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X8X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 193514046488576
-// CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-// CHECK-NEXT:    store <24 x i8> zeroinitializer, ptr [[TMP15]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_p8(poly8_t *a, poly8x8x3_t b) {
-  vst3_p8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst3_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X4X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X4X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 193514046488576
-// CHECK-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
-// CHECK-NEXT:    store <12 x i16> zeroinitializer, ptr [[TMP21]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[TMP16]], <4 x i16> [[TMP17]], <4 x i16> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst3_p16(poly16_t *a, poly16x4x3_t b) {
-  vst3_p16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X16X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X16X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP13:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
-// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-// CHECK-NEXT:    store <64 x i8> zeroinitializer, ptr [[TMP16]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP13]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_u8(uint8_t *a, uint8x16x4_t b) {
-  vst4q_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X8X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X8X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i16> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <8 x i16>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <32 x i16> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP18]], <8 x i16> [[TMP19]], <8 x i16> [[TMP20]], <8 x i16> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_u16(uint16_t *a, uint16x8x4_t b) {
-  vst4q_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X4X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X4X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i32> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i32>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i32> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <4 x i32>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP18]], <4 x i32> [[TMP19]], <4 x i32> [[TMP20]], <4 x i32> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_u32(uint32_t *a, uint32x4x4_t b) {
-  vst4q_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i64> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x i64>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <8 x i64> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP18]], <2 x i64> [[TMP19]], <2 x i64> [[TMP20]], <2 x i64> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_u64(uint64_t *a, uint64x2x4_t b) {
-  vst4q_u64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X16X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X16X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP13:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
-// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-// CHECK-NEXT:    store <64 x i8> zeroinitializer, ptr [[TMP16]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP13]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_s8(int8_t *a, int8x16x4_t b) {
-  vst4q_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X8X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X8X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i16> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <8 x i16>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <32 x i16> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP18]], <8 x i16> [[TMP19]], <8 x i16> [[TMP20]], <8 x i16> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_s16(int16_t *a, int16x8x4_t b) {
-  vst4q_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i32>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X4X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X4X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <4 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i32>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i32> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i32>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i32> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i32>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i32> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i32>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i32>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i32> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x i32>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x i32>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x i32>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <4 x i32>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[TMP18]], <4 x i32> [[TMP19]], <4 x i32> [[TMP20]], <4 x i32> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_s32(int32_t *a, int32x4x4_t b) {
-  vst4q_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i64> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x i64>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <8 x i64> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[TMP18]], <2 x i64> [[TMP19]], <2 x i64> [[TMP20]], <2 x i64> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_s64(int64_t *a, int64x2x4_t b) {
-  vst4q_s64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x half>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X8X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X8X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <8 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x half>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x half> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x half>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x half> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x half>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x half> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x half>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <8 x half>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x half> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x half>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x half>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x half>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <8 x half>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <32 x i16> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8f16.p0(<8 x half> [[TMP18]], <8 x half> [[TMP19]], <8 x half> [[TMP20]], <8 x half> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_f16(float16_t *a, float16x8x4_t b) {
-  vst4q_f16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x float>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X4X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X4X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <4 x i32>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <4 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x float> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x float>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x float> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x float> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x float> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <4 x float>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <4 x float>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <4 x float>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <4 x float>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float> [[TMP18]], <4 x float> [[TMP19]], <4 x float> [[TMP20]], <4 x float> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_f32(float32_t *a, float32x4x4_t b) {
-  vst4q_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x double> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x double>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x double> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x double>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x double>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <8 x i64> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2f64.p0(<2 x double> [[TMP18]], <2 x double> [[TMP19]], <2 x double> [[TMP20]], <2 x double> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_f64(float64_t *a, float64x2x4_t b) {
-  vst4q_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <16 x i8>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X16X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X16X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <16 x i8>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <16 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <16 x i8>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <16 x i8>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <16 x i8>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY8X16X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <16 x i8>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP13:%.*]] = load <16 x i8>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
-// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-// CHECK-NEXT:    store <64 x i8> zeroinitializer, ptr [[TMP16]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[TMP10]], <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <16 x i8> [[TMP13]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_p8(poly8_t *a, poly8x16x4_t b) {
-  vst4q_p8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4q_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i16>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X8X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X8X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <8 x i16>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <8 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i16>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <8 x i16> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i16>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i16> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <8 x i16>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i16> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY16X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i16>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <8 x i16>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i16> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <8 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <8 x i16>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <8 x i16>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <8 x i16>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <32 x i16> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[TMP18]], <8 x i16> [[TMP19]], <8 x i16> [[TMP20]], <8 x i16> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4q_p16(poly16_t *a, poly16x8x4_t b) {
-  vst4q_p16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_u8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT8X8X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT8X8X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
-// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP16]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], <8 x i8> [[TMP13]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_u8(uint8_t *a, uint8x8x4_t b) {
-  vst4_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_u16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT16X4X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT16X4X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i16> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <4 x i16>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP18]], <4 x i16> [[TMP19]], <4 x i16> [[TMP20]], <4 x i16> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_u16(uint16_t *a, uint16x4x4_t b) {
-  vst4_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_u32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT32X2X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT32X2X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i32> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i32>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i32> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x i32>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <2 x i32>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP18]], <2 x i32> [[TMP19]], <2 x i32> [[TMP20]], <2 x i32> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_u32(uint32_t *a, uint32x2x4_t b) {
-  vst4_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_u64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_UINT64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_UINT64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_UINT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x i64> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x i64>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP18]], <1 x i64> [[TMP19]], <1 x i64> [[TMP20]], <1 x i64> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_u64(uint64_t *a, uint64x1x4_t b) {
-  vst4_u64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_s8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT8X8X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT8X8X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
-// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP16]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], <8 x i8> [[TMP13]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_s8(int8_t *a, int8x8x4_t b) {
-  vst4_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_s16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT16X4X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT16X4X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i16> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <4 x i16>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP18]], <4 x i16> [[TMP19]], <4 x i16> [[TMP20]], <4 x i16> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_s16(int16_t *a, int16x4x4_t b) {
-  vst4_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_s32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i32>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT32X2X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT32X2X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <2 x i32>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i32> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i32>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i32> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i32>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i32> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i32>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i32>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i32> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x i32>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x i32>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x i32>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <2 x i32>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[TMP18]], <2 x i32> [[TMP19]], <2 x i32> [[TMP20]], <2 x i32> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_s32(int32_t *a, int32x2x4_t b) {
-  vst4_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_s64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_INT64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_INT64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_INT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x i64> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x i64>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[TMP18]], <1 x i64> [[TMP19]], <1 x i64> [[TMP20]], <1 x i64> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_s64(int64_t *a, int64x1x4_t b) {
-  vst4_s64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_f16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x half>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT16X4X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT16X4X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <4 x half>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x half>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x half> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x half>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x half> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x half>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x half> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x half>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x half>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x half>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x half>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x half>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <4 x half>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4f16.p0(<4 x half> [[TMP18]], <4 x half> [[TMP19]], <4 x half> [[TMP20]], <4 x half> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_f16(float16_t *a, float16x4x4_t b) {
-  vst4_f16(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_f32(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x float>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT32X2X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT32X2X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <2 x i32>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <2 x float>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x float> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x float> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x float>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x float> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT32X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x float>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x float>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x float> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <2 x float>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <2 x float>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <2 x float>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <2 x float>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2f32.p0(<2 x float> [[TMP18]], <2 x float> [[TMP19]], <2 x float> [[TMP20]], <2 x float> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_f32(float32_t *a, float32x2x4_t b) {
-  vst4_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_f64(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x double> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x double>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x double> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x double>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x double>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1f64.p0(<1 x double> [[TMP18]], <1 x double> [[TMP19]], <1 x double> [[TMP20]], <1 x double> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_f64(float64_t *a, float64x1x4_t b) {
-  vst4_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_p8(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <8 x i8>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY8X8X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY8X8X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <8 x i8>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <8 x i8>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <8 x i8>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP11:%.*]] = load <8 x i8>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP12:%.*]] = load <8 x i8>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY8X8X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <8 x i8>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i8>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
-// CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-// CHECK-NEXT:    store <32 x i8> zeroinitializer, ptr [[TMP16]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[TMP10]], <8 x i8> [[TMP11]], <8 x i8> [[TMP12]], <8 x i8> [[TMP13]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_p8(poly8_t *a, poly8x8x4_t b) {
-  vst4_p8(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst4_p16(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <4 x i16>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY16X4X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY16X4X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <4 x i16>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <4 x i16>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i16>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <4 x i16> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i16>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <4 x i16> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <4 x i16>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <4 x i16> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY16X4X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <4 x i16>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <4 x i16>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x i16> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <4 x i16>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <4 x i16>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <4 x i16>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <4 x i16>
-// CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[A]] to i64
-// CHECK-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 193514046488576
-// CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
-// CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr [[TMP24]], align 1
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[TMP18]], <4 x i16> [[TMP19]], <4 x i16> [[TMP20]], <4 x i16> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst4_p16(poly16_t *a, poly16x4x4_t b) {
-  vst4_p16(a, b);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x2x2_t @test_vld1q_f64_x2(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X2_T]] [[TMP10]]
-//
-float64x2x2_t test_vld1q_f64_x2(float64_t const *a) {
-  return vld1q_f64_x2(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly64x2x2_t @test_vld1q_p64_x2(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X2X2_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [2 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY64X2X2_T]] [[TMP10]]
-//
-poly64x2x2_t test_vld1q_p64_x2(poly64_t const *a) {
-  return vld1q_p64_x2(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x1x2_t @test_vld1_f64_x2(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X2_T]] [[TMP10]]
-//
-float64x1x2_t test_vld1_f64_x2(float64_t const *a) {
-  return vld1_f64_x2(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly64x1x2_t @test_vld1_p64_x2(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 16)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X1X2_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [2 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY64X1X2_T]] [[TMP10]]
-//
-poly64x1x2_t test_vld1_p64_x2(poly64_t const *a) {
-  return vld1_p64_x2(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x2x3_t @test_vld1q_f64_x3(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x double>, <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X3_T]] [[TMP10]]
-//
-float64x2x3_t test_vld1q_f64_x3(float64_t const *a) {
-  return vld1q_f64_x3(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly64x2x3_t @test_vld1q_p64_x3(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 48)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X2X3_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [3 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY64X2X3_T]] [[TMP10]]
-//
-poly64x2x3_t test_vld1q_p64_x3(poly64_t const *a) {
-  return vld1q_p64_x3(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x1x3_t @test_vld1_f64_x3(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x double>, <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X3_T]] [[TMP10]]
-//
-float64x1x3_t test_vld1_f64_x3(float64_t const *a) {
-  return vld1_f64_x3(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly64x1x3_t @test_vld1_p64_x3(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 24)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X1X3_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [3 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY64X1X3_T]] [[TMP10]]
-//
-poly64x1x3_t test_vld1_p64_x3(poly64_t const *a) {
-  return vld1_p64_x3(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x2x4_t @test_vld1q_f64_x4(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x double>, <2 x double>, <2 x double>, <2 x double> } [[VLD1XN]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X2X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X2X4_T]] [[TMP10]]
-//
-float64x2x4_t test_vld1q_f64_x4(float64_t const *a) {
-  return vld1q_f64_x4(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly64x2x4_t @test_vld1q_p64_x4(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[VLD1XN]], ptr [[__RET]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 64)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X2X4_T]], ptr [[RETVAL]], align 16
-// CHECK-NEXT:    store { [4 x <2 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY64X2X4_T]] [[TMP10]]
-//
-poly64x2x4_t test_vld1q_p64_x4(poly64_t const *a) {
-  return vld1q_p64_x4(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.float64x1x4_t @test_vld1_f64_x4(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x double>, <1 x double>, <1 x double>, <1 x double> } [[VLD1XN]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_FLOAT64X1X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_FLOAT64X1X4_T]] [[TMP10]]
-//
-float64x1x4_t test_vld1_f64_x4(float64_t const *a) {
-  return vld1_f64_x4(a);
-}
-
-// CHECK-LABEL: define dso_local %struct.poly64x1x4_t @test_vld1_p64_x4(
-// CHECK-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_POLY64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[RETVAL]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__RET:%.*]] = alloca [[STRUCT_POLY64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[VLD1XN:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0(ptr [[A]])
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[__RET]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } [[VLD1XN]], ptr [[__RET]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[RETVAL]], ptr [[__RET]], i64 32)
-// CHECK-NEXT:    [[TMP10:%.*]] = load [[STRUCT_POLY64X1X4_T]], ptr [[RETVAL]], align 8
-// CHECK-NEXT:    store { [4 x <1 x i64>] } zeroinitializer, ptr @__msan_retval_tls, align 8
-// CHECK-NEXT:    ret [[STRUCT_POLY64X1X4_T]] [[TMP10]]
-//
-poly64x1x4_t test_vld1_p64_x4(poly64_t const *a) {
-  return vld1_p64_x4(a);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_f64_x2(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double> [[TMP14]], <2 x double> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_f64_x2(float64_t *a, float64x2x2_t b) {
-  vst1q_f64_x2(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_p64_x2(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X2X2_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X2X2_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [2 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> [[TMP14]], <2 x i64> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_p64_x2(poly64_t *a, poly64x2x2_t b) {
-  vst1q_p64_x2(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_f64_x2(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double> [[TMP14]], <1 x double> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1_f64_x2(float64_t *a, float64x1x2_t b) {
-  vst1_f64_x2(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_p64_x2(
-// CHECK-SAME: ptr noundef [[A:%.*]], [2 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X1X2_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X1X2_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 16, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X2_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [2 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [2 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 16)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X2_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> [[TMP14]], <1 x i64> [[TMP15]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1_p64_x2(poly64_t *a, poly64x1x2_t b) {
-  vst1_p64_x2(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_f64_x3(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x double>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x double> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x double>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double> [[TMP16]], <2 x double> [[TMP17]], <2 x double> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_f64_x3(float64_t *a, float64x2x3_t b) {
-  vst1q_f64_x3(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_p64_x3(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X2X3_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X2X3_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 48, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [3 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 48)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> [[TMP16]], <2 x i64> [[TMP17]], <2 x i64> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_p64_x3(poly64_t *a, poly64x2x3_t b) {
-  vst1q_p64_x3(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_f64_x3(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x double>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x double> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x double>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double> [[TMP16]], <1 x double> [[TMP17]], <1 x double> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1_f64_x3(float64_t *a, float64x1x3_t b) {
-  vst1_f64_x3(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_p64_x3(
-// CHECK-SAME: ptr noundef [[A:%.*]], [3 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X1X3_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X1X3_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 24, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X3_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [3 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [3 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 24)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X3_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [3 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> [[TMP16]], <1 x i64> [[TMP17]], <1 x i64> [[TMP18]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1_p64_x3(poly64_t *a, poly64x1x3_t b) {
-  vst1_p64_x3(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_f64_x4(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x double>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <2 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x double> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x double>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x double> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x double>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x double> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x double>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x double>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x double> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x double>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x double>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x double>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x double>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double> [[TMP18]], <2 x double> [[TMP19]], <2 x double> [[TMP20]], <2 x double> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_f64_x4(float64_t *a, float64x2x4_t b) {
-  vst1q_f64_x4(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1q_p64_x4(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <2 x i64>] alignstack(16) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X2X4_T:%.*]], align 16
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP2]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X2X4_T]], align 16
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP5]], i8 0, i64 64, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <2 x i64>] zeroinitializer, ptr [[TMP8]], align 16
-// CHECK-NEXT:    store [4 x <2 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 16
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 64)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <2 x i64>, ptr [[ARRAYIDX]], align 16
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <2 x i64> [[TMP10]] to <16 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <2 x i64>, ptr [[ARRAYIDX2]], align 16
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP12]] to <16 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <2 x i64>, ptr [[ARRAYIDX4]], align 16
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP14]] to <16 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY64X2X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <2 x i64>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <2 x i64>, ptr [[ARRAYIDX6]], align 16
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <2 x i64> [[TMP16]] to <16 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP11]] to <2 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <16 x i8> [[TMP13]] to <2 x i64>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP15]] to <2 x i64>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP17]] to <2 x i64>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> [[TMP18]], <2 x i64> [[TMP19]], <2 x i64> [[TMP20]], <2 x i64> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1q_p64_x4(poly64_t *a, poly64x2x4_t b) {
-  vst1q_p64_x4(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_f64_x4(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x double>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_FLOAT64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <1 x double>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x double>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x double> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x double>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x double> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x double>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x double> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_FLOAT64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x double>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x double>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x double> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x double>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x double>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x double>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x double>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double> [[TMP18]], <1 x double> [[TMP19]], <1 x double> [[TMP20]], <1 x double> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1_f64_x4(float64_t *a, float64x1x4_t b) {
-  vst1_f64_x4(a, b);
-}
-
-// CHECK-LABEL: define dso_local void @test_vst1_p64_x4(
-// CHECK-SAME: ptr noundef [[A:%.*]], [4 x <1 x i64>] alignstack(8) [[B_COERCE:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    call void @llvm.donothing()
-// CHECK-NEXT:    [[B:%.*]] = alloca [[STRUCT_POLY64X1X4_T:%.*]], align 8
-// CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[B]] to i64
-// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 193514046488576
-// CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[__S1:%.*]] = alloca [[STRUCT_POLY64X1X4_T]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[__S1]] to i64
-// CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
-// CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-// CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 32, i1 false)
-// CHECK-NEXT:    [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[B]], i32 0, i32 0
-// CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[COERCE_DIVE]] to i64
-// CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
-// CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-// CHECK-NEXT:    store [4 x <1 x i64>] zeroinitializer, ptr [[TMP8]], align 8
-// CHECK-NEXT:    store [4 x <1 x i64>] [[B_COERCE]], ptr [[COERCE_DIVE]], align 8
-// CHECK-NEXT:    [[TMP9:%.*]] = call ptr @__msan_memcpy(ptr [[__S1]], ptr [[B]], i64 32)
-// CHECK-NEXT:    [[VAL:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL]], i64 0, i64 0
-// CHECK-NEXT:    [[TMP10:%.*]] = load <1 x i64>, ptr [[ARRAYIDX]], align 8
-// CHECK-NEXT:    [[TMP11:%.*]] = bitcast <1 x i64> [[TMP10]] to <8 x i8>
-// CHECK-NEXT:    [[VAL1:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL1]], i64 0, i64 1
-// CHECK-NEXT:    [[TMP12:%.*]] = load <1 x i64>, ptr [[ARRAYIDX2]], align 8
-// CHECK-NEXT:    [[TMP13:%.*]] = bitcast <1 x i64> [[TMP12]] to <8 x i8>
-// CHECK-NEXT:    [[VAL3:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL3]], i64 0, i64 2
-// CHECK-NEXT:    [[TMP14:%.*]] = load <1 x i64>, ptr [[ARRAYIDX4]], align 8
-// CHECK-NEXT:    [[TMP15:%.*]] = bitcast <1 x i64> [[TMP14]] to <8 x i8>
-// CHECK-NEXT:    [[VAL5:%.*]] = getelementptr inbounds [[STRUCT_POLY64X1X4_T]], ptr [[__S1]], i32 0, i32 0
-// CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds [4 x <1 x i64>], ptr [[VAL5]], i64 0, i64 3
-// CHECK-NEXT:    [[TMP16:%.*]] = load <1 x i64>, ptr [[ARRAYIDX6]], align 8
-// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <1 x i64> [[TMP16]] to <8 x i8>
-// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP11]] to <1 x i64>
-// CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP13]] to <1 x i64>
-// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP15]] to <1 x i64>
-// CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP17]] to <1 x i64>
-// CHECK-NEXT:    call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> [[TMP18]], <1 x i64> [[TMP19]], <1 x i64> [[TMP20]], <1 x i64> [[TMP21]], ptr [[A]])
-// CHECK-NEXT:    ret void
-//
-void test_vst1_p64_x4(poly64_t *a, poly64x1x4_t b) {
-  vst1_p64_x4(a, b);
-}
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index f3f628333dec8..c83e24323413e 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3903,6 +3903,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     return IRB.CreateShuffleVector(left, right, ConstantVector::get(Idxs));
   }
 
+  Value *getShadowOrOrigin (Instruction* I, int i, bool shadowMode) {
+    if (shadowMode)
+      return getShadow (I, i);
+    else
+      return getOrigin (I, i);
+  }
+
   Value *interleaveShadowOrOrigin(IRBuilder<> &IRB, IntrinsicInst &I,
                                   bool shadowMode) {
     // Call arguments only
@@ -3925,9 +3932,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
     uint16_t Width =
         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
-    if (!shadowMode) {
-      Width = Width / 4; // One origin value per 32-bits of app memory
-    }
+//    Width = Width / 4; // One origin value per 32-bits of app memory
+
     uint16_t ElemSize = cast<FixedVectorType>(I.getArgOperand(0)->getType())
                             ->getElementType()
                             ->getPrimitiveSizeInBits();
@@ -3941,18 +3947,23 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
     Value *interleaved = nullptr;
     if (numVectors == 1) {
-      interleaved = getShadow(&I, 0);
+      interleaved = getShadowOrOrigin(&I, 0, shadowMode);
     } else if (numVectors == 2) {
       interleaved =
-          interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
+          interleaveAB(IRB, getShadowOrOrigin(&I, 0, shadowMode),
+                            getShadowOrOrigin(&I, 1, shadowMode), Width);
     } else if (numVectors == 3) {
-      Value *UndefV = UndefValue::get(getShadow(&I, 0)->getType());
-      Value *AB = interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
-      Value *Cx = interleaveAB(IRB, getShadow(&I, 2), UndefV, Width);
+      Value *UndefV = UndefValue::get(getShadowOrOrigin(&I, 0, shadowMode)->getType());
+      Value *AB = interleaveAB(IRB, getShadowOrOrigin(&I, 0, shadowMode),
+                                    getShadowOrOrigin(&I, 1, shadowMode),
+                                    Width);
+      Value *Cx = interleaveAB(IRB, getShadowOrOrigin(&I, 2, shadowMode), UndefV, Width);
       interleaved = interleaveABCx(IRB, AB, Cx, Width);
     } else if (numVectors == 4) {
-      Value *AB = interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
-      Value *CD = interleaveAB(IRB, getShadow(&I, 2), getShadow(&I, 3), Width);
+      Value *AB = interleaveAB(IRB, getShadowOrOrigin(&I, 0, shadowMode),
+                                    getShadowOrOrigin(&I, 1, shadowMode), Width);
+      Value *CD = interleaveAB(IRB, getShadowOrOrigin(&I, 2, shadowMode),
+                                    getShadowOrOrigin(&I, 3, shadowMode), Width);
       interleaved = interleaveAB(IRB, AB, CD, Width * 2);
     } else {
       //          assert(! "Unexpected number of vectors");
@@ -3980,17 +3991,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
         Addr, IRB, interleavedShadow->getType(), Align(1), /*isStore*/ true);
     IRB.CreateAlignedStore(interleavedShadow, ShadowPtr, Align(1));
+//    setShadow (&I, interleavedShadow);
 
     if (MS.TrackOrigins) {
-      setOrigin(&I, getCleanOrigin());
+//      setOrigin(&I, getCleanOrigin());
 
-      /*
-            errs() << "Inserting origin information ...\n";
-            Value *interleavedOrigin = interleaveShadowOrOrigin (IRB, I, false);
+        errs() << "Inserting origin information ...\n";
+        Value *interleavedOrigin = interleaveShadowOrOrigin (IRB, I, false);
 
-            errs() << "Adding store for origin ...\n";
-            IRB.CreateAlignedStore(interleavedOrigin, OriginPtr, Align(1));
-      */
+        errs() << "Adding store for origin ...\n";
+        IRB.CreateAlignedStore(interleavedOrigin, OriginPtr, Align(1));
 
       //      setOriginForNaryIntrinsic(I, true);
     }
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
index 1c4ca47b60c13..a770bda3519da 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
@@ -1,7 +1,7 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build/bin/opt --version 2
 ; Test memory sanitizer instrumentation for Arm NEON VST instructions.
 ;
-; RUN: opt < %s -passes=msan -S | FileCheck %s
+; RUN: build/bin/opt < %s -passes=msan -msan-track-origins=2 -S | FileCheck %s
 ;
 ; Forked from llvm/test/CodeGen/AArch64/arm64-st1.ll
 
@@ -14,21 +14,26 @@ define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
 ; CHECK-LABEL: define void @st2_8b
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0:![0-9]+]]
-; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
-; CHECK-NEXT:    unreachable
-; CHECK:       7:
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
+; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT:    store <16 x i8> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK:       14:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4:[0-9]+]]
+; CHECK-NEXT:    unreachable
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -40,9 +45,24 @@ define void @st2_8b_undefA(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
 ; CHECK-LABEL: define void @st2_8b_undefA
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
+; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store <16 x i8> [[TMP5]], ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -54,9 +74,24 @@ define void @st2_8b_undefB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
 ; CHECK-LABEL: define void @st2_8b_undefB
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
+; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store <16 x i8> [[TMP5]], ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -68,8 +103,21 @@ define void @st2_8b_undefAB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_
 ; CHECK-LABEL: define void @st2_8b_undefAB
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
+; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    store <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -81,25 +129,30 @@ define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sani
 ; CHECK-LABEL: define void @st3_8b
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> [[TMP5]], <8 x i8> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <24 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       9:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -111,10 +164,28 @@ define void @st3_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-LABEL: define void @st3_8b_undefA
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP8]], <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <24 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -126,10 +197,28 @@ define void @st3_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-LABEL: define void @st3_8b_undefB
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP8]], <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <24 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -141,15 +230,27 @@ define void @st3_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-LABEL: define void @st3_8b_undefC
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> <i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef>, <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
+; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    store <24 x i8> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -161,9 +262,25 @@ define void @st3_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-LABEL: define void @st3_8b_undefAB
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> [[TMP5]], <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -175,9 +292,25 @@ define void @st3_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-LABEL: define void @st3_8b_undefAC
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> <i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef>, <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -189,9 +322,25 @@ define void @st3_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-LABEL: define void @st3_8b_undefBC
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> <i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef>, <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -203,8 +352,21 @@ define void @st3_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) noun
 ; CHECK-LABEL: define void @st3_8b_undefABC
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
+; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    store <24 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -216,29 +378,32 @@ define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P)
 ; CHECK-LABEL: define void @st4_8b
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP8]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <8 x i8> [[TMP4]] to i64
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR7:%.*]] = or i1 [[_MSOR5]], [[_MSCMP6]]
-; CHECK-NEXT:    br i1 [[_MSOR7]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <8 x i8> [[TMP5]], <8 x i8> [[TMP7]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <16 x i8> [[TMP11]], <16 x i8> [[TMP12]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
+; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       11:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -250,11 +415,30 @@ define void @st4_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-LABEL: define void @st4_8b_undefA
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> [[TMP5]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -266,11 +450,30 @@ define void @st4_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-LABEL: define void @st4_8b_undefB
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> [[TMP5]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -282,16 +485,30 @@ define void @st4_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-LABEL: define void @st4_8b_undefC
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP5]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -303,19 +520,30 @@ define void @st4_8b_undefD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-LABEL: define void @st4_8b_undefD
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> [[TMP5]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -327,10 +555,27 @@ define void @st4_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefAB
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> [[TMP7]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
+; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -342,10 +587,28 @@ define void @st4_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefAC
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP8]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -357,10 +620,28 @@ define void @st4_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefBC
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP8]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -372,10 +653,28 @@ define void @st4_8b_undefBD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefBD
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP8]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -387,9 +686,25 @@ define void @st4_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefABC
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> [[TMP5]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -401,9 +716,25 @@ define void @st4_8b_undefABD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefABD
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> [[TMP5]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -415,9 +746,25 @@ define void @st4_8b_undefACD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefACD
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -429,9 +776,25 @@ define void @st4_8b_undefBCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefBCD
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -443,8 +806,21 @@ define void @st4_8b_undefABCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D
 ; CHECK-LABEL: define void @st4_8b_undefABCD
 ; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
+; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    store <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -462,21 +838,26 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor
 ; CHECK-LABEL: define void @st2_16b
 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       7:
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP3]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
+; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT:    store <32 x i8> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -488,25 +869,30 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind
 ; CHECK-LABEL: define void @st3_16b
 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP3]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> undef, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <32 x i8> [[TMP9]], <32 x i8> [[TMP10]], <48 x i32> <i32 0, i32 1, i32 16, i32 2, i32 3, i32 18, i32 4, i32 5, i32 20, i32 6, i32 7, i32 22, i32 8, i32 9, i32 24, i32 10, i32 11, i32 26, i32 12, i32 13, i32 28, i32 14, i32 15, i32 30, i32 16, i32 17, i32 32, i32 18, i32 19, i32 34, i32 20, i32 21, i32 36, i32 22, i32 23, i32 38, i32 24, i32 25, i32 40, i32 26, i32 27, i32 42, i32 28, i32 29, i32 44, i32 30, i32 31, i32 46>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <48 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       9:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], <16 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -518,29 +904,32 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr
 ; CHECK-LABEL: define void @st4_16b
 ; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP8]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <16 x i8> [[TMP4]] to i128
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i128 [[TMP9]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR7:%.*]] = or i1 [[_MSOR5]], [[_MSCMP6]]
-; CHECK-NEXT:    br i1 [[_MSOR7]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP3]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> [[TMP7]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <32 x i8> [[TMP11]], <32 x i8> [[TMP12]], <64 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
+; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store <64 x i8> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       11:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], <16 x i8> [[C]], <16 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -558,21 +947,26 @@ define void @st2_4h(<4 x i16> %A, <4 x i16> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_4h
 ; CHECK-SAME: (<4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i16> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i16> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       7:
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
+; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT:    store <8 x i16> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -584,25 +978,30 @@ define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_4h
 ; CHECK-SAME: (<4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i16> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <4 x i16> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <4 x i16> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x i16> [[TMP5]], <4 x i16> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <8 x i16> [[TMP9]], <8 x i16> [[TMP10]], <12 x i32> <i32 0, i32 1, i32 4, i32 2, i32 3, i32 6, i32 4, i32 5, i32 8, i32 6, i32 7, i32 10>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <12 x i16> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       9:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], <4 x i16> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -614,29 +1013,32 @@ define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr
 ; CHECK-LABEL: define void @st4_4h
 ; CHECK-SAME: (<4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], <4 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <4 x i16> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <4 x i16> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x i16> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP8]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <4 x i16> [[TMP4]] to i64
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR7:%.*]] = or i1 [[_MSOR5]], [[_MSCMP6]]
-; CHECK-NEXT:    br i1 [[_MSOR7]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i16> [[TMP5]], <4 x i16> [[TMP7]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <8 x i16> [[TMP11]], <8 x i16> [[TMP12]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
+; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store <16 x i16> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       11:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], <4 x i16> [[C]], <4 x i16> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -654,21 +1056,26 @@ define void @st2_8h(<8 x i16> %A, <8 x i16> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_8h
 ; CHECK-SAME: (<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       7:
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
+; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT:    store <16 x i16> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -680,25 +1087,30 @@ define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_8h
 ; CHECK-SAME: (<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i16> [[TMP5]], <8 x i16> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <16 x i16> [[TMP9]], <16 x i16> [[TMP10]], <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <24 x i16> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       9:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], <8 x i16> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -710,29 +1122,32 @@ define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr
 ; CHECK-LABEL: define void @st4_8h
 ; CHECK-SAME: (<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i16> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP8]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <8 x i16> [[TMP4]] to i128
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i128 [[TMP9]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR7:%.*]] = or i1 [[_MSOR5]], [[_MSCMP6]]
-; CHECK-NEXT:    br i1 [[_MSOR7]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <8 x i16> [[TMP5]], <8 x i16> [[TMP7]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <16 x i16> [[TMP11]], <16 x i16> [[TMP12]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
+; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store <32 x i16> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       11:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], <8 x i16> [[C]], <8 x i16> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -750,21 +1165,26 @@ define void @st2_2s(<2 x i32> %A, <2 x i32> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_2s
 ; CHECK-SAME: (<2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i32> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       7:
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
+; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT:    store <4 x i32> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -776,25 +1196,30 @@ define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_2s
 ; CHECK-SAME: (<2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <2 x i32> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <2 x i32> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i32> [[TMP5]], <2 x i32> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i32> [[TMP9]], <4 x i32> [[TMP10]], <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <6 x i32> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       9:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], <2 x i32> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -806,29 +1231,32 @@ define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr
 ; CHECK-LABEL: define void @st4_2s
 ; CHECK-SAME: (<2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <2 x i32> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <2 x i32> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP8]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <2 x i32> [[TMP4]] to i64
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR7:%.*]] = or i1 [[_MSOR5]], [[_MSCMP6]]
-; CHECK-NEXT:    br i1 [[_MSOR7]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <2 x i32> [[TMP5]], <2 x i32> [[TMP7]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <4 x i32> [[TMP11]], <4 x i32> [[TMP12]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
+; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store <8 x i32> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       11:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], <2 x i32> [[C]], <2 x i32> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -844,21 +1272,26 @@ define void @st2_4s(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_4s
 ; CHECK-SAME: (<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       7:
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
+; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT:    store <8 x i32> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -870,25 +1303,30 @@ define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_4s
 ; CHECK-SAME: (<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <8 x i32> [[TMP9]], <8 x i32> [[TMP10]], <12 x i32> <i32 0, i32 1, i32 4, i32 2, i32 3, i32 6, i32 4, i32 5, i32 8, i32 6, i32 7, i32 10>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <12 x i32> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       9:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], <4 x i32> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -900,29 +1338,32 @@ define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr
 ; CHECK-LABEL: define void @st4_4s
 ; CHECK-SAME: (<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP8]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <4 x i32> [[TMP4]] to i128
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i128 [[TMP9]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR7:%.*]] = or i1 [[_MSOR5]], [[_MSCMP6]]
-; CHECK-NEXT:    br i1 [[_MSOR7]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> [[TMP7]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <8 x i32> [[TMP11]], <8 x i32> [[TMP12]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
+; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store <16 x i32> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       11:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], <4 x i32> [[C]], <4 x i32> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -941,21 +1382,26 @@ define void @st2_1d(<1 x i64> %A, <1 x i64> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_1d
 ; CHECK-SAME: (<1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <1 x i64> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <1 x i64> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       7:
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP3]], <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
+; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT:    store <2 x i64> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -967,25 +1413,30 @@ define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_1d
 ; CHECK-SAME: (<1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <1 x i64> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <1 x i64> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP3]], <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <1 x i64> [[TMP5]], <1 x i64> undef, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <2 x i64> [[TMP9]], <2 x i64> [[TMP10]], <3 x i32> <i32 0, i32 1, i32 1>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <3 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       9:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], <1 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -997,29 +1448,32 @@ define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr
 ; CHECK-LABEL: define void @st4_1d
 ; CHECK-SAME: (<1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <1 x i64> [[TMP1]] to i64
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP6]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <1 x i64> [[TMP2]] to i64
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP8]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <1 x i64> [[TMP4]] to i64
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR7:%.*]] = or i1 [[_MSOR5]], [[_MSCMP6]]
-; CHECK-NEXT:    br i1 [[_MSOR7]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP3]], <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <1 x i64> [[TMP5]], <1 x i64> [[TMP7]], <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <2 x i64> [[TMP11]], <2 x i64> [[TMP12]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
+; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store <4 x i64> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       11:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], <1 x i64> [[C]], <1 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1037,21 +1491,26 @@ define void @st2_2d(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_2d
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
-; CHECK-NEXT:    unreachable
-; CHECK:       7:
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
+; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
+; CHECK-NEXT:    store <4 x i64> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1063,9 +1522,24 @@ define void @st2_2d_undefA(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
 ; CHECK-LABEL: define void @st2_2d_undefA
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
+; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1077,9 +1551,24 @@ define void @st2_2d_undefB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
 ; CHECK-LABEL: define void @st2_2d_undefB
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
+; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1091,8 +1580,21 @@ define void @st2_2d_undefAB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitiz
 ; CHECK-LABEL: define void @st2_2d_undefAB
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
+; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    store <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1104,25 +1606,30 @@ define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_2d
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i64 [[TMP4]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i64> [[TMP9]], <4 x i64> [[TMP10]], <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <6 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       9:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1134,10 +1641,28 @@ define void @st3_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-LABEL: define void @st3_2d_undefA
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <6 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1149,10 +1674,28 @@ define void @st3_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-LABEL: define void @st3_2d_undefB
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <6 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1164,15 +1707,27 @@ define void @st3_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-LABEL: define void @st3_2d_undefC
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> <i64 -1, i64 undef, i64 -1, i64 undef>, <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
+; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    store <6 x i64> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1184,9 +1739,25 @@ define void @st3_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-LABEL: define void @st3_2d_undefAB
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> [[TMP5]], <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1198,9 +1769,25 @@ define void @st3_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-LABEL: define void @st3_2d_undefAC
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> <i64 -1, i64 undef, i64 -1, i64 undef>, <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1212,9 +1799,25 @@ define void @st3_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-LABEL: define void @st3_2d_undefBC
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> <i64 -1, i64 undef, i64 -1, i64 undef>, <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1226,8 +1829,21 @@ define void @st3_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) n
 ; CHECK-LABEL: define void @st3_2d_undefABC
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
+; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    store <6 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1239,29 +1855,32 @@ define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr
 ; CHECK-LABEL: define void @st4_2d
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP8]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <2 x i64> [[TMP4]] to i128
-; CHECK-NEXT:    [[_MSCMP4:%.*]] = icmp ne i128 [[TMP9]], 0
-; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
-; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR7:%.*]] = or i1 [[_MSOR5]], [[_MSCMP6]]
-; CHECK-NEXT:    br i1 [[_MSOR7]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> [[TMP7]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP13:%.*]] = shufflevector <4 x i64> [[TMP11]], <4 x i64> [[TMP12]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
+; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
+; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       11:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1277,11 +1896,30 @@ define void @st4_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-LABEL: define void @st4_2d_undefA
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> [[TMP5]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i64> [[TMP9]], <4 x i64> [[TMP10]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1293,11 +1931,30 @@ define void @st4_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-LABEL: define void @st4_2d_undefB
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> [[TMP5]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i64> [[TMP9]], <4 x i64> [[TMP10]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1309,16 +1966,30 @@ define void @st4_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-LABEL: define void @st4_2d_undefC
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP5]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i64> [[TMP9]], <4 x i64> [[TMP10]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1330,19 +2001,30 @@ define void @st4_2d_undefD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-LABEL: define void @st4_2d_undefD
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i128 [[TMP7]], 0
-; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i64> [[TMP9]], <4 x i64> [[TMP10]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
+; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1354,10 +2036,27 @@ define void @st4_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefAB
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> [[TMP7]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
+; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1369,10 +2068,28 @@ define void @st4_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefAC
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1384,10 +2101,28 @@ define void @st4_2d_undefAD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefAD
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1399,10 +2134,28 @@ define void @st4_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefBC
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1414,10 +2167,28 @@ define void @st4_2d_undefBD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefBD
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
+; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
+; CHECK:       16:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       17:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1429,15 +2200,27 @@ define void @st4_2d_undefCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefCD
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP4]], 0
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP5]], 0
-; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
+; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1449,9 +2232,25 @@ define void @st4_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefABC
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> [[TMP5]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1463,9 +2262,25 @@ define void @st4_2d_undefABD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefABD
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> [[TMP5]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1477,9 +2292,25 @@ define void @st4_2d_undefACD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefACD
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1491,9 +2322,25 @@ define void @st4_2d_undefBCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefBCD
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
+; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
+; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
+; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       14:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1505,8 +2352,21 @@ define void @st4_2d_undefABCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64
 ; CHECK-LABEL: define void @st4_2d_undefABCD
 ; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
+; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    store <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;

>From 2fedc53f85a18cc5693a217bc0a5d64efe2dd83b Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Tue, 16 Jul 2024 21:01:19 +0000
Subject: [PATCH 03/14] Update instrumentation and test

---
 .../Instrumentation/MemorySanitizer.cpp       |   24 +-
 .../MemorySanitizer/AArch64/neon_vst.ll       | 1174 ++++++++++++-----
 2 files changed, 823 insertions(+), 375 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index c83e24323413e..6c7a593bdcfe2 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2505,13 +2505,20 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   using OriginCombiner = Combiner<false>;
 
   /// Propagate origin for arbitrary operation.
-  void setOriginForNaryOp(Instruction &I) {
+  void setOriginForNaryOp(Instruction &I, bool skipLastOperand = false) {
     if (!MS.TrackOrigins)
       return;
     IRBuilder<> IRB(&I);
     OriginCombiner OC(this, IRB);
-    for (Use &Op : I.operands())
-      OC.Add(Op.get());
+
+    if (skipLastOperand)
+      assert((I.getNumOperands() > 0)
+             && "Skip last operand requested on instruction with no operands");
+
+    unsigned int i = 0;
+    for (i = 0; i < I.getNumOperands() - (skipLastOperand ? 1 : 0); i++) {
+      OC.Add(I.getOperand (i));
+    }
     OC.Done(&I);
   }
 
@@ -3991,18 +3998,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
         Addr, IRB, interleavedShadow->getType(), Align(1), /*isStore*/ true);
     IRB.CreateAlignedStore(interleavedShadow, ShadowPtr, Align(1));
-//    setShadow (&I, interleavedShadow);
 
     if (MS.TrackOrigins) {
-//      setOrigin(&I, getCleanOrigin());
-
-        errs() << "Inserting origin information ...\n";
-        Value *interleavedOrigin = interleaveShadowOrOrigin (IRB, I, false);
-
-        errs() << "Adding store for origin ...\n";
-        IRB.CreateAlignedStore(interleavedOrigin, OriginPtr, Align(1));
-
-      //      setOriginForNaryIntrinsic(I, true);
+        setOriginForNaryOp(I, true);
     }
 
     if (ClCheckAccessAddress) {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
index a770bda3519da..0f54e35ac8841 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
@@ -1,7 +1,7 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool build/bin/opt --version 2
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool opt --version 4
 ; Test memory sanitizer instrumentation for Arm NEON VST instructions.
 ;
-; RUN: build/bin/opt < %s -passes=msan -msan-track-origins=2 -S | FileCheck %s
+; RUN: opt < %s -passes=msan -msan-track-origins=2 -S | FileCheck %s
 ;
 ; Forked from llvm/test/CodeGen/AArch64/arm64-st1.ll
 
@@ -11,8 +11,8 @@ target triple = "aarch64--linux-android9001"
 ; -----------------------------------------------------------------------------------------------------------------------------------------------
 
 define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_8b
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-LABEL: define void @st2_8b(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -28,12 +28,17 @@ define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
 ; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <16 x i8> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0:![0-9]+]]
-; CHECK:       14:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK:       19:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4:[0-9]+]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       20:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -42,8 +47,8 @@ define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
 }
 
 define void @st2_8b_undefA(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_8b_undefA
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_8b_undefA(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -57,12 +62,17 @@ define void @st2_8b_undefA(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
 ; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
 ; CHECK-NEXT:    store <16 x i8> [[TMP5]], ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP12]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP14]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
-; CHECK:       12:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       17:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       13:
+; CHECK:       18:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -71,8 +81,8 @@ define void @st2_8b_undefA(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
 }
 
 define void @st2_8b_undefB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_8b_undefB
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_8b_undefB(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -86,12 +96,14 @@ define void @st2_8b_undefB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
 ; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
 ; CHECK-NEXT:    store <16 x i8> [[TMP5]], ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
-; CHECK:       12:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       13:
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -100,8 +112,8 @@ define void @st2_8b_undefB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
 }
 
 define void @st2_8b_undefAB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_8b_undefAB
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_8b_undefAB(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
@@ -112,12 +124,14 @@ define void @st2_8b_undefAB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
-; CHECK:       9:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       11:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       10:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -126,8 +140,8 @@ define void @st2_8b_undefAB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_
 }
 
 define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8b
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8b(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -147,12 +161,20 @@ define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sani
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -161,8 +183,8 @@ define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sani
 }
 
 define void @st3_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8b_undefA
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8b_undefA(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -180,12 +202,20 @@ define void @st3_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i64 [[TMP19]], 0
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
+; CHECK:       24:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       25:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -194,8 +224,8 @@ define void @st3_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 }
 
 define void @st3_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8b_undefB
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8b_undefB(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -213,12 +243,17 @@ define void @st3_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
+; CHECK:       21:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       22:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -227,8 +262,8 @@ define void @st3_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 }
 
 define void @st3_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8b_undefC
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8b_undefC(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -245,12 +280,17 @@ define void @st3_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP18:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP6]], i32 [[TMP17]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -259,8 +299,8 @@ define void @st3_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 }
 
 define void @st3_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8b_undefAB
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8b_undefAB(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
@@ -275,12 +315,17 @@ define void @st3_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -289,8 +334,8 @@ define void @st3_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 }
 
 define void @st3_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8b_undefAC
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8b_undefAC(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
@@ -305,12 +350,17 @@ define void @st3_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -319,8 +369,8 @@ define void @st3_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 }
 
 define void @st3_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8b_undefBC
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8b_undefBC(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
@@ -335,12 +385,14 @@ define void @st3_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -349,8 +401,8 @@ define void @st3_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 }
 
 define void @st3_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8b_undefABC
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8b_undefABC(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
@@ -361,12 +413,14 @@ define void @st3_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) noun
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <24 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
-; CHECK:       9:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       11:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       10:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -375,8 +429,8 @@ define void @st3_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) noun
 }
 
 define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -398,12 +452,23 @@ define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P)
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
 ; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP23]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <8 x i8> [[TMP7]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP26]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
+; CHECK:       31:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       32:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -412,8 +477,8 @@ define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P)
 }
 
 define void @st4_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefA
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefA(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -433,12 +498,23 @@ define void @st4_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP4]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = icmp ne i64 [[TMP24]], 0
+; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP25]], i32 [[TMP6]], i32 [[TMP23]]
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP26]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP29:%.*]], label [[TMP30:%.*]], !prof [[PROF0]]
+; CHECK:       29:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       30:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -447,8 +523,8 @@ define void @st4_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 }
 
 define void @st4_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefB
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefB(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -468,12 +544,20 @@ define void @st4_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -482,8 +566,8 @@ define void @st4_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 }
 
 define void @st4_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefC
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefC(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -503,12 +587,20 @@ define void @st4_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -517,8 +609,8 @@ define void @st4_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 }
 
 define void @st4_8b_undefD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefD
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefD(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -538,12 +630,20 @@ define void @st4_8b_undefD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -552,8 +652,8 @@ define void @st4_8b_undefD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 }
 
 define void @st4_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefAB
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefAB(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
@@ -570,12 +670,20 @@ define void @st4_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP17]]
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP6]], i32 [[TMP20]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP23:%.*]], label [[TMP24:%.*]], !prof [[PROF0]]
+; CHECK:       23:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       24:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -584,8 +692,8 @@ define void @st4_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 }
 
 define void @st4_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefAC
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefAC(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
@@ -603,12 +711,20 @@ define void @st4_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i64 [[TMP19]], 0
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
+; CHECK:       24:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       25:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -617,8 +733,8 @@ define void @st4_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 }
 
 define void @st4_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefBC
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefBC(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
@@ -636,12 +752,17 @@ define void @st4_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
+; CHECK:       21:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       22:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -650,8 +771,8 @@ define void @st4_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 }
 
 define void @st4_8b_undefBD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefBD
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefBD(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -669,12 +790,17 @@ define void @st4_8b_undefBD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
+; CHECK:       21:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       22:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -683,8 +809,8 @@ define void @st4_8b_undefBD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 }
 
 define void @st4_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefABC
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefABC(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -699,12 +825,17 @@ define void @st4_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -713,8 +844,8 @@ define void @st4_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 }
 
 define void @st4_8b_undefABD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefABD
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefABD(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -729,12 +860,17 @@ define void @st4_8b_undefABD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -743,8 +879,8 @@ define void @st4_8b_undefABD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 }
 
 define void @st4_8b_undefACD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefACD
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefACD(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -759,12 +895,17 @@ define void @st4_8b_undefACD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -773,8 +914,8 @@ define void @st4_8b_undefACD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 }
 
 define void @st4_8b_undefBCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefBCD
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefBCD(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -789,12 +930,14 @@ define void @st4_8b_undefBCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -803,8 +946,8 @@ define void @st4_8b_undefBCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 }
 
 define void @st4_8b_undefABCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8b_undefABCD
-; CHECK-SAME: (<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8b_undefABCD(
+; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
@@ -815,12 +958,14 @@ define void @st4_8b_undefABCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
-; CHECK:       9:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       11:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       10:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -835,8 +980,8 @@ declare void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, ptr) n
 declare void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr) nounwind sanitize_memory readonly
 
 define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_16b
-; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_16b(
+; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -852,12 +997,17 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor
 ; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i128 [[TMP14]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; CHECK:       19:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       20:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -866,8 +1016,8 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor
 }
 
 define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_16b
-; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_16b(
+; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -887,12 +1037,20 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <48 x i8> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], <16 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -901,8 +1059,8 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind
 }
 
 define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_16b
-; CHECK-SAME: (<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_16b(
+; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -924,12 +1082,23 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
 ; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <64 x i8> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i128 [[TMP20]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <16 x i8> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i128 [[TMP23]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <16 x i8> [[TMP7]] to i128
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i128 [[TMP26]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
+; CHECK:       31:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       32:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], <16 x i8> [[C]], <16 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -944,8 +1113,8 @@ declare void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, pt
 declare void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, ptr) nounwind sanitize_memory readonly
 
 define void @st2_4h(<4 x i16> %A, <4 x i16> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_4h
-; CHECK-SAME: (<4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_4h(
+; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -961,12 +1130,17 @@ define void @st2_4h(<4 x i16> %A, <4 x i16> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <8 x i16> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <4 x i16> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; CHECK:       19:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       20:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -975,8 +1149,8 @@ define void @st2_4h(<4 x i16> %A, <4 x i16> %B, ptr %P) nounwind sanitize_memory
 }
 
 define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_4h
-; CHECK-SAME: (<4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_4h(
+; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -996,12 +1170,20 @@ define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <12 x i16> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x i16> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <4 x i16> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], <4 x i16> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1010,8 +1192,8 @@ define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind s
 }
 
 define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_4h
-; CHECK-SAME: (<4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], <4 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_4h(
+; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], <4 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -1033,12 +1215,23 @@ define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
 ; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <16 x i16> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x i16> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <4 x i16> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP23]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <4 x i16> [[TMP7]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP26]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
+; CHECK:       31:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       32:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], <4 x i16> [[C]], <4 x i16> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1053,8 +1246,8 @@ declare void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, pt
 declare void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, ptr) nounwind sanitize_memory readonly
 
 define void @st2_8h(<8 x i16> %A, <8 x i16> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_8h
-; CHECK-SAME: (<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_8h(
+; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1070,12 +1263,17 @@ define void @st2_8h(<8 x i16> %A, <8 x i16> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <16 x i16> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i128 [[TMP14]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; CHECK:       19:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       20:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1084,8 +1282,8 @@ define void @st2_8h(<8 x i16> %A, <8 x i16> %B, ptr %P) nounwind sanitize_memory
 }
 
 define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_8h
-; CHECK-SAME: (<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_8h(
+; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1105,12 +1303,20 @@ define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <24 x i16> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i16> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], <8 x i16> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1119,8 +1325,8 @@ define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind s
 }
 
 define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_8h
-; CHECK-SAME: (<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_8h(
+; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1142,12 +1348,23 @@ define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
 ; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <32 x i16> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i128 [[TMP20]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <8 x i16> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i128 [[TMP23]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <8 x i16> [[TMP7]] to i128
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i128 [[TMP26]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
+; CHECK:       31:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       32:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], <8 x i16> [[C]], <8 x i16> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1162,8 +1379,8 @@ declare void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, pt
 declare void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, ptr) nounwind sanitize_memory readonly
 
 define void @st2_2s(<2 x i32> %A, <2 x i32> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_2s
-; CHECK-SAME: (<2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_2s(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -1179,12 +1396,17 @@ define void @st2_2s(<2 x i32> %A, <2 x i32> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <4 x i32> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <2 x i32> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; CHECK:       19:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       20:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1193,8 +1415,8 @@ define void @st2_2s(<2 x i32> %A, <2 x i32> %B, ptr %P) nounwind sanitize_memory
 }
 
 define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2s
-; CHECK-SAME: (<2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2s(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -1214,12 +1436,20 @@ define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <6 x i32> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i32> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i32> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], <2 x i32> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1228,8 +1458,8 @@ define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind s
 }
 
 define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2s
-; CHECK-SAME: (<2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2s(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -1251,12 +1481,23 @@ define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
 ; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <8 x i32> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <2 x i32> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <2 x i32> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP23]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP7]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP26]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
+; CHECK:       31:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       32:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], <2 x i32> [[C]], <2 x i32> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1269,8 +1510,8 @@ declare void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, pt
 declare void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, ptr) nounwind sanitize_memory readonly
 
 define void @st2_4s(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_4s
-; CHECK-SAME: (<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_4s(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1286,12 +1527,17 @@ define void @st2_4s(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <8 x i32> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i128 [[TMP14]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; CHECK:       19:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       20:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1300,8 +1546,8 @@ define void @st2_4s(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind sanitize_memory
 }
 
 define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_4s
-; CHECK-SAME: (<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_4s(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1321,12 +1567,20 @@ define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <12 x i32> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], <4 x i32> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1335,8 +1589,8 @@ define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind s
 }
 
 define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_4s
-; CHECK-SAME: (<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_4s(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1358,12 +1612,23 @@ define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
 ; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <16 x i32> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i128 [[TMP20]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i128 [[TMP23]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <4 x i32> [[TMP7]] to i128
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i128 [[TMP26]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
+; CHECK:       31:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       32:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], <4 x i32> [[C]], <4 x i32> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1379,8 +1644,8 @@ declare void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4
 
 ; If there's only one element, st2/3/4 don't make much sense, stick to st1.
 define void @st2_1d(<1 x i64> %A, <1 x i64> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_1d
-; CHECK-SAME: (<1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_1d(
+; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -1396,12 +1661,17 @@ define void @st2_1d(<1 x i64> %A, <1 x i64> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <2 x i64> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; CHECK:       19:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       20:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1410,8 +1680,8 @@ define void @st2_1d(<1 x i64> %A, <1 x i64> %B, ptr %P) nounwind sanitize_memory
 }
 
 define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_1d
-; CHECK-SAME: (<1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_1d(
+; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -1431,12 +1701,20 @@ define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <3 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <1 x i64> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], <1 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1445,8 +1723,8 @@ define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind s
 }
 
 define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_1d
-; CHECK-SAME: (<1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_1d(
+; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
@@ -1468,12 +1746,23 @@ define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
 ; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <4 x i64> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <1 x i64> [[TMP5]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP23]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <1 x i64> [[TMP7]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP26]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
+; CHECK:       31:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       32:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], <1 x i64> [[C]], <1 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1488,8 +1777,8 @@ declare void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, pt
 declare void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, ptr) nounwind sanitize_memory readonly
 
 define void @st2_2d(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_2d
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_2d(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1505,12 +1794,17 @@ define void @st2_2d(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <4 x i64> [[TMP7]], ptr [[TMP10]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i128 [[TMP14]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; CHECK:       19:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       20:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1519,8 +1813,8 @@ define void @st2_2d(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory
 }
 
 define void @st2_2d_undefA(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_2d_undefA
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_2d_undefA(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -1534,12 +1828,17 @@ define void @st2_2d_undefA(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
 ; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
 ; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i128 [[TMP12]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP14]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
-; CHECK:       12:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       17:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       13:
+; CHECK:       18:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1548,8 +1847,8 @@ define void @st2_2d_undefA(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
 }
 
 define void @st2_2d_undefB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_2d_undefB
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_2d_undefB(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -1563,12 +1862,14 @@ define void @st2_2d_undefB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
 ; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
 ; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
-; CHECK:       12:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       14:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       13:
+; CHECK:       15:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1577,8 +1878,8 @@ define void @st2_2d_undefB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
 }
 
 define void @st2_2d_undefAB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st2_2d_undefAB
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st2_2d_undefAB(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
@@ -1589,12 +1890,14 @@ define void @st2_2d_undefAB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitiz
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
-; CHECK:       9:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       11:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       10:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1603,8 +1906,8 @@ define void @st2_2d_undefAB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitiz
 }
 
 define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2d
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2d(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1624,12 +1927,20 @@ define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1638,8 +1949,8 @@ define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind s
 }
 
 define void @st3_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2d_undefA
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2d_undefA(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -1657,12 +1968,20 @@ define void @st3_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i128 [[TMP19]], 0
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
+; CHECK:       24:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       25:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1671,8 +1990,8 @@ define void @st3_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 }
 
 define void @st3_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2d_undefB
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2d_undefB(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -1690,12 +2009,17 @@ define void @st3_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
+; CHECK:       21:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       22:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1704,8 +2028,8 @@ define void @st3_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 }
 
 define void @st3_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2d_undefC
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2d_undefC(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1722,12 +2046,17 @@ define void @st3_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i128 [[TMP15]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP18:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP6]], i32 [[TMP17]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1736,8 +2065,8 @@ define void @st3_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 }
 
 define void @st3_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2d_undefAB
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2d_undefAB(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
@@ -1752,12 +2081,17 @@ define void @st3_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1766,8 +2100,8 @@ define void @st3_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 }
 
 define void @st3_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2d_undefAC
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2d_undefAC(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
@@ -1782,12 +2116,17 @@ define void @st3_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1796,8 +2135,8 @@ define void @st3_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 }
 
 define void @st3_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2d_undefBC
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2d_undefBC(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
@@ -1812,12 +2151,14 @@ define void @st3_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1826,8 +2167,8 @@ define void @st3_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 }
 
 define void @st3_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st3_2d_undefABC
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st3_2d_undefABC(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
@@ -1838,12 +2179,14 @@ define void @st3_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) n
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <6 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
-; CHECK:       9:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       11:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       10:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1852,8 +2195,8 @@ define void @st3_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) n
 }
 
 define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1875,12 +2218,23 @@ define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
 ; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP13]], ptr [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i128 [[TMP20]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i128 [[TMP23]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <2 x i64> [[TMP7]] to i128
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i128 [[TMP26]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
+; CHECK:       31:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       32:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1893,8 +2247,8 @@ declare void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, pt
 declare void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, ptr) nounwind sanitize_memory readonly
 
 define void @st4_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefA
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefA(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -1914,12 +2268,23 @@ define void @st4_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP4]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP25:%.*]] = icmp ne i128 [[TMP24]], 0
+; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP25]], i32 [[TMP6]], i32 [[TMP23]]
+; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP26]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP29:%.*]], label [[TMP30:%.*]], !prof [[PROF0]]
+; CHECK:       29:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       30:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1928,8 +2293,8 @@ define void @st4_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 }
 
 define void @st4_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefB
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefB(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -1949,12 +2314,20 @@ define void @st4_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1963,8 +2336,8 @@ define void @st4_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 }
 
 define void @st4_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefC
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefC(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -1984,12 +2357,20 @@ define void @st4_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1998,8 +2379,8 @@ define void @st4_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 }
 
 define void @st4_2d_undefD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefD
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefD(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -2019,12 +2400,20 @@ define void @st4_2d_undefD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
 ; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
+; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
+; CHECK:       26:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       27:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2033,8 +2422,8 @@ define void @st4_2d_undefD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 }
 
 define void @st4_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefAB
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefAB(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
@@ -2051,12 +2440,20 @@ define void @st4_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i128 [[TMP15]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP17]]
+; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP6]], i32 [[TMP20]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP23:%.*]], label [[TMP24:%.*]], !prof [[PROF0]]
+; CHECK:       23:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       24:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2065,8 +2462,8 @@ define void @st4_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefAC
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefAC(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
@@ -2084,12 +2481,20 @@ define void @st4_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i128 [[TMP19]], 0
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
+; CHECK:       24:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       25:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2098,8 +2503,8 @@ define void @st4_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefAD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefAD
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefAD(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -2117,12 +2522,20 @@ define void @st4_2d_undefAD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i128 [[TMP19]], 0
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
+; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
+; CHECK:       24:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       25:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2131,8 +2544,8 @@ define void @st4_2d_undefAD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefBC
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefBC(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
@@ -2150,12 +2563,17 @@ define void @st4_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
+; CHECK:       21:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       22:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2164,8 +2582,8 @@ define void @st4_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefBD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefBD
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefBD(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
@@ -2183,12 +2601,17 @@ define void @st4_2d_undefBD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
 ; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP16:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       16:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
+; CHECK:       21:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       17:
+; CHECK:       22:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2197,8 +2620,8 @@ define void @st4_2d_undefBD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefCD
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefCD(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
@@ -2215,12 +2638,17 @@ define void @st4_2d_undefCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP8]], ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i128 [[TMP15]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP2]]
+; CHECK-NEXT:    [[TMP18:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP6]], i32 [[TMP17]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
+; CHECK:       20:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       21:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2229,8 +2657,8 @@ define void @st4_2d_undefCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefABC
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefABC(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
@@ -2245,12 +2673,17 @@ define void @st4_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2259,8 +2692,8 @@ define void @st4_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefABD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefABD
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefABD(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
@@ -2275,12 +2708,17 @@ define void @st4_2d_undefABD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2289,8 +2727,8 @@ define void @st4_2d_undefABD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefACD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefACD
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefACD(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
@@ -2305,12 +2743,17 @@ define void @st4_2d_undefACD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
+; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       18:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       19:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2319,8 +2762,8 @@ define void @st4_2d_undefACD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefBCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefBCD
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefBCD(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
@@ -2335,12 +2778,14 @@ define void @st4_2d_undefBCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; CHECK:       13:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       15:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       14:
+; CHECK:       16:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2349,8 +2794,8 @@ define void @st4_2d_undefBCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 }
 
 define void @st4_2d_undefABCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind sanitize_memory {
-; CHECK-LABEL: define void @st4_2d_undefABCD
-; CHECK-SAME: (<2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
+; CHECK-LABEL: define void @st4_2d_undefABCD(
+; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
@@ -2361,15 +2806,20 @@ define void @st4_2d_undefABCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
-; CHECK:       9:
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       11:
 ; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       10:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr %P)
   ret void
 }
+;.
+; CHECK: [[PROF0]] = !{!"branch_weights", i32 1, i32 1048575}
+;.

>From c11d56276eb6569e7f3a9dfa356bd6b8847c71e3 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Tue, 16 Jul 2024 21:30:00 +0000
Subject: [PATCH 04/14] Simplify code and reformat

---
 .../Instrumentation/MemorySanitizer.cpp       | 68 +++++--------------
 1 file changed, 18 insertions(+), 50 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 6c7a593bdcfe2..b410423c18554 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2505,6 +2505,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   using OriginCombiner = Combiner<false>;
 
   /// Propagate origin for arbitrary operation.
+  /// skipLastOperand is useful for Arm NEON instructions, which have the
+  /// destination address as the last operand.
   void setOriginForNaryOp(Instruction &I, bool skipLastOperand = false) {
     if (!MS.TrackOrigins)
       return;
@@ -2512,12 +2514,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     OriginCombiner OC(this, IRB);
 
     if (skipLastOperand)
-      assert((I.getNumOperands() > 0)
-             && "Skip last operand requested on instruction with no operands");
+      assert((I.getNumOperands() > 0) &&
+             "Skip last operand requested on instruction with no operands");
 
     unsigned int i = 0;
     for (i = 0; i < I.getNumOperands() - (skipLastOperand ? 1 : 0); i++) {
-      OC.Add(I.getOperand (i));
+      OC.Add(I.getOperand(i));
     }
     OC.Done(&I);
   }
@@ -3910,15 +3912,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     return IRB.CreateShuffleVector(left, right, ConstantVector::get(Idxs));
   }
 
-  Value *getShadowOrOrigin (Instruction* I, int i, bool shadowMode) {
-    if (shadowMode)
-      return getShadow (I, i);
-    else
-      return getOrigin (I, i);
-  }
-
-  Value *interleaveShadowOrOrigin(IRBuilder<> &IRB, IntrinsicInst &I,
-                                  bool shadowMode) {
+  Value *interleaveShadowOrOrigin(IRBuilder<> &IRB, IntrinsicInst &I) {
     // Call arguments only
     int numArgOperands = I.getNumOperands() - 1;
     assert(numArgOperands >= 1);
@@ -3931,55 +3925,31 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
     // Last operand is the destination
     assert(isa<PointerType>(I.getArgOperand(numArgOperands - 1)->getType()));
-    errs() << "Assertions ok\n";
-
-    for (unsigned int i = 0; i < I.getNumOperands(); i++) {
-      errs() << "Operand " << i << ": " << I.getOperand(i)->getName() << "\n";
-    }
 
     uint16_t Width =
         cast<FixedVectorType>(I.getArgOperand(0)->getType())->getNumElements();
-//    Width = Width / 4; // One origin value per 32-bits of app memory
-
-    uint16_t ElemSize = cast<FixedVectorType>(I.getArgOperand(0)->getType())
-                            ->getElementType()
-                            ->getPrimitiveSizeInBits();
-
-    dumpInst(I);
-    errs() << "Num operands: " << I.getNumOperands() << "\n";
-    errs() << "Num arg operands: " << numArgOperands << "\n";
-    errs() << "Num vectors: " << numVectors << "\n";
-    errs() << "Width: " << Width << "\n";
-    errs() << "Elem size: " << ElemSize << "\n";
 
     Value *interleaved = nullptr;
-    if (numVectors == 1) {
-      interleaved = getShadowOrOrigin(&I, 0, shadowMode);
-    } else if (numVectors == 2) {
+    if (numVectors == 2) {
       interleaved =
-          interleaveAB(IRB, getShadowOrOrigin(&I, 0, shadowMode),
-                            getShadowOrOrigin(&I, 1, shadowMode), Width);
+          interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
     } else if (numVectors == 3) {
-      Value *UndefV = UndefValue::get(getShadowOrOrigin(&I, 0, shadowMode)->getType());
-      Value *AB = interleaveAB(IRB, getShadowOrOrigin(&I, 0, shadowMode),
-                                    getShadowOrOrigin(&I, 1, shadowMode),
-                                    Width);
-      Value *Cx = interleaveAB(IRB, getShadowOrOrigin(&I, 2, shadowMode), UndefV, Width);
+      Value *UndefV = UndefValue::get(getShadow(&I, 0)->getType());
+      Value *AB = interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
+      Value *Cx = interleaveAB(IRB, getShadow(&I, 2), UndefV, Width);
       interleaved = interleaveABCx(IRB, AB, Cx, Width);
     } else if (numVectors == 4) {
-      Value *AB = interleaveAB(IRB, getShadowOrOrigin(&I, 0, shadowMode),
-                                    getShadowOrOrigin(&I, 1, shadowMode), Width);
-      Value *CD = interleaveAB(IRB, getShadowOrOrigin(&I, 2, shadowMode),
-                                    getShadowOrOrigin(&I, 3, shadowMode), Width);
+      Value *AB = interleaveAB(IRB, getShadow(&I, 0), getShadow(&I, 1), Width);
+      Value *CD = interleaveAB(IRB, getShadow(&I, 2), getShadow(&I, 3), Width);
       interleaved = interleaveAB(IRB, AB, CD, Width * 2);
     } else {
-      //          assert(! "Unexpected number of vectors");
+      assert((numVectors >= 2) && (numVectors <= 4));
     }
 
     return interleaved;
   }
 
-  /// Handle Arm NEON vector store intrinsics (vst{1,2,3,4}).
+  /// Handle Arm NEON vector store intrinsics (vst{2,3,4}).
   ///
   /// Arm NEON vector store intrinsics have the output address (pointer) as the
   /// last argument, with the initial arguments being the inputs. They return
@@ -3987,7 +3957,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   void handleNEONVectorStoreIntrinsic(IntrinsicInst &I) {
     IRBuilder<> IRB(&I);
 
-    Value *interleavedShadow = interleaveShadowOrOrigin(IRB, I, true);
+    Value *interleavedShadow = interleaveShadowOrOrigin(IRB, I);
 
     // Call arguments only
     int numArgOperands = I.getNumOperands() - 1;
@@ -4000,13 +3970,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     IRB.CreateAlignedStore(interleavedShadow, ShadowPtr, Align(1));
 
     if (MS.TrackOrigins) {
-        setOriginForNaryOp(I, true);
+      setOriginForNaryOp(I, true);
     }
 
-    if (ClCheckAccessAddress) {
-      errs() << "Inserting shadow check ...\n";
+    if (ClCheckAccessAddress)
       insertShadowCheck(Addr, &I);
-    }
   }
 
   void visitIntrinsicInst(IntrinsicInst &I) {

>From 0f9f606ec119d95f27baafc885b24fa66b52b1e7 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Tue, 16 Jul 2024 21:31:40 +0000
Subject: [PATCH 05/14] Remove unnecessary braces

---
 llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index b410423c18554..372ec86d21095 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2518,9 +2518,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
              "Skip last operand requested on instruction with no operands");
 
     unsigned int i = 0;
-    for (i = 0; i < I.getNumOperands() - (skipLastOperand ? 1 : 0); i++) {
+    for (i = 0; i < I.getNumOperands() - (skipLastOperand ? 1 : 0); i++)
       OC.Add(I.getOperand(i));
-    }
+
     OC.Done(&I);
   }
 
@@ -3969,9 +3969,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
         Addr, IRB, interleavedShadow->getType(), Align(1), /*isStore*/ true);
     IRB.CreateAlignedStore(interleavedShadow, ShadowPtr, Align(1));
 
-    if (MS.TrackOrigins) {
+    if (MS.TrackOrigins)
       setOriginForNaryOp(I, true);
-    }
 
     if (ClCheckAccessAddress)
       insertShadowCheck(Addr, &I);

>From d4b1502e9c5e16fbdcbeec3def3cddf15130bf30 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 16:26:31 +0000
Subject: [PATCH 06/14] Skip last two operands

---
 .../Instrumentation/MemorySanitizer.cpp       | 24 +++++++++----------
 .../MemorySanitizer/AArch64/neon_vst.ll       |  2 +-
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 372ec86d21095..dafc6c7808d92 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2505,21 +2505,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   using OriginCombiner = Combiner<false>;
 
   /// Propagate origin for arbitrary operation.
-  /// skipLastOperand is useful for Arm NEON instructions, which have the
-  /// destination address as the last operand.
-  void setOriginForNaryOp(Instruction &I, bool skipLastOperand = false) {
+  void setOriginForNaryOp(Instruction &I, unsigned int skipLastOperands = 0) {
     if (!MS.TrackOrigins)
       return;
     IRBuilder<> IRB(&I);
     OriginCombiner OC(this, IRB);
 
-    if (skipLastOperand)
-      assert((I.getNumOperands() > 0) &&
-             "Skip last operand requested on instruction with no operands");
-
-    unsigned int i = 0;
-    for (i = 0; i < I.getNumOperands() - (skipLastOperand ? 1 : 0); i++)
-      OC.Add(I.getOperand(i));
+    if (skipLastOperands > 0)
+      assert((I.getNumOperands() > skipLastOperands) &&
+             "Insufficient number of operands to skip!");
 
     OC.Done(&I);
   }
@@ -3969,8 +3963,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
         Addr, IRB, interleavedShadow->getType(), Align(1), /*isStore*/ true);
     IRB.CreateAlignedStore(interleavedShadow, ShadowPtr, Align(1));
 
-    if (MS.TrackOrigins)
-      setOriginForNaryOp(I, true);
+    if (MS.TrackOrigins) {
+      // We don't use the last two operands to compute the origin, because:
+      // - the last operand is the callee
+      //   e.g., 'declare void @llvm.aarch64.neon.st2.v8i16.p0(...'
+      // - the second-last operand is the return value
+      //   e.g., '%arraydecay74 = getelementptr inbounds ...'
+      setOriginForNaryOp(I, 2);
+    }
 
     if (ClCheckAccessAddress)
       insertShadowCheck(Addr, &I);
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
index 0f54e35ac8841..3fd7ca05a5608 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --tool opt --version 4
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
 ; Test memory sanitizer instrumentation for Arm NEON VST instructions.
 ;
 ; RUN: opt < %s -passes=msan -msan-track-origins=2 -S | FileCheck %s

>From 7571757554666df19f527e3acd58d46a181472e8 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 17:10:16 +0000
Subject: [PATCH 07/14] Origin tracking

---
 llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index dafc6c7808d92..4cf73c890326a 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2505,6 +2505,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   using OriginCombiner = Combiner<false>;
 
   /// Propagate origin for arbitrary operation.
+  ///
+  /// Optionally skips n trailing operands.
   void setOriginForNaryOp(Instruction &I, unsigned int skipLastOperands = 0) {
     if (!MS.TrackOrigins)
       return;
@@ -2515,6 +2517,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       assert((I.getNumOperands() > skipLastOperands) &&
              "Insufficient number of operands to skip!");
 
+    for (unsigned int i = 0; i < I.getNumOperands() - skipLastOperands; i++)
+      OC.Add(I.getOperand(i));
+
     OC.Done(&I);
   }
 

>From 10edff532c75334d0d93e2b66d00fed0150d8b90 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 17:19:06 +0000
Subject: [PATCH 08/14] Add preconditions for interleave functions

---
 llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 4cf73c890326a..c17caa12c2514 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3876,9 +3876,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
   // Given two shadows AAAA..., BBBB..., return the interleaved value
   // ABABABAB ...
+  //
+  // Width == number of elements in A == number of elements in B
   Value *interleaveAB(IRBuilder<> &IRB, Value *left, Value *right, uint Width) {
     assert(isa<FixedVectorType>(left->getType()));
     assert(isa<FixedVectorType>(right->getType()));
+    assert(cast<FixedVectorType>(left->getType())->getNumElements() == Width);
+    assert(cast<FixedVectorType>(right->getType())->getNumElements() == Width);
 
     SmallVector<Constant *> Idxs;
 
@@ -3892,10 +3896,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
   // Given three shadows, which are already interleaved into two shadows
   // ABABABAB and CxCxCxCx (x is undef), return the interleaved value ABCABCABC.
+  //
+  // Note: Width == number of elements in A == number of elements in B
+  //             == number of elements in C
   Value *interleaveABCx(IRBuilder<> &IRB, Value *left, Value *right,
                         uint Width) {
     assert(isa<FixedVectorType>(left->getType()));
     assert(isa<FixedVectorType>(right->getType()));
+    assert(cast<FixedVectorType>(left->getType())->getNumElements() == 2 * Width);
+    assert(cast<FixedVectorType>(right->getType())->getNumElements() == 2 * Width);
 
     SmallVector<Constant *> Idxs;
 

>From 2337a90bd7d2e2eea7f0538a639bdd19de37aaba Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 17:22:17 +0000
Subject: [PATCH 09/14] Comment

---
 llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index c17caa12c2514..103e96f8510c6 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3920,7 +3920,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     return IRB.CreateShuffleVector(left, right, ConstantVector::get(Idxs));
   }
 
-  Value *interleaveShadowOrOrigin(IRBuilder<> &IRB, IntrinsicInst &I) {
+  /// Calculates the shadow when interleaving 2, 3 or 4 vectors
+  /// (e.g., for Arm NEON vector store).
+  Value *interleaveShadow(IRBuilder<> &IRB, IntrinsicInst &I) {
     // Call arguments only
     int numArgOperands = I.getNumOperands() - 1;
     assert(numArgOperands >= 1);
@@ -3965,7 +3967,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   void handleNEONVectorStoreIntrinsic(IntrinsicInst &I) {
     IRBuilder<> IRB(&I);
 
-    Value *interleavedShadow = interleaveShadowOrOrigin(IRB, I);
+    Value *interleavedShadow = interleaveShadow(IRB, I);
 
     // Call arguments only
     int numArgOperands = I.getNumOperands() - 1;

>From 4289ebb0d757cbac356c6e47926c3365655b9844 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 17:23:22 +0000
Subject: [PATCH 10/14] Wording

---
 llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 103e96f8510c6..8ec41edc2646f 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3920,7 +3920,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     return IRB.CreateShuffleVector(left, right, ConstantVector::get(Idxs));
   }
 
-  /// Calculates the shadow when interleaving 2, 3 or 4 vectors
+  /// Calculates the shadow for interleaving 2, 3 or 4 vectors
   /// (e.g., for Arm NEON vector store).
   Value *interleaveShadow(IRBuilder<> &IRB, IntrinsicInst &I) {
     // Call arguments only

>From e7d6a6c4306e1ec37b0373b74b5175caab42b254 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 17:58:40 +0000
Subject: [PATCH 11/14] Remove track-origins from test

---
 .../MemorySanitizer/AArch64/neon_vst.ll       | 1457 ++++-------------
 1 file changed, 288 insertions(+), 1169 deletions(-)

diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
index 3fd7ca05a5608..d7e61e814ba7a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
 ; Test memory sanitizer instrumentation for Arm NEON VST instructions.
 ;
-; RUN: opt < %s -passes=msan -msan-track-origins=2 -S | FileCheck %s
+; RUN: opt < %s -passes=msan -S | FileCheck %s
 ;
 ; Forked from llvm/test/CodeGen/AArch64/arm64-st1.ll
 
@@ -14,31 +14,20 @@ define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
 ; CHECK-LABEL: define void @st2_8b(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <16 x i8> [[TMP7]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0:![0-9]+]]
-; CHECK:       19:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4:[0-9]+]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       20:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -50,29 +39,19 @@ define void @st2_8b_undefA(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
 ; CHECK-LABEL: define void @st2_8b_undefA(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
-; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
-; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
 ; CHECK-NEXT:    store <16 x i8> [[TMP5]], ptr [[TMP8]], align 1
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP12]], 0
-; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP14]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
-; CHECK:       17:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       7:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       18:
+; CHECK:       8:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -84,26 +63,19 @@ define void @st2_8b_undefB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_m
 ; CHECK-LABEL: define void @st2_8b_undefB(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
-; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
-; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
 ; CHECK-NEXT:    store <16 x i8> [[TMP5]], ptr [[TMP8]], align 1
-; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       7:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       8:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -115,23 +87,17 @@ define void @st2_8b_undefAB(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_
 ; CHECK-LABEL: define void @st2_8b_undefAB(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
 ; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
-; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; CHECK:       5:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK:       6:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -143,13 +109,9 @@ define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sani
 ; CHECK-LABEL: define void @st3_8b(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> [[TMP5]], <8 x i8> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -157,24 +119,13 @@ define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind sani
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -186,11 +137,8 @@ define void @st3_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-LABEL: define void @st3_8b_undefA(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -198,24 +146,13 @@ define void @st3_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i64 [[TMP19]], 0
-; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
-; CHECK:       24:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       25:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -227,11 +164,8 @@ define void @st3_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-LABEL: define void @st3_8b_undefB(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -239,21 +173,13 @@ define void @st3_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
-; CHECK:       21:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       22:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -265,32 +191,21 @@ define void @st3_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwi
 ; CHECK-LABEL: define void @st3_8b_undefC(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> <i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef>, <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
 ; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
-; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
-; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP8]], ptr [[TMP11]], align 1
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP18:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP6]], i32 [[TMP17]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -302,30 +217,20 @@ define void @st3_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-LABEL: define void @st3_8b_undefAB(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> [[TMP5]], <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -337,30 +242,20 @@ define void @st3_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-LABEL: define void @st3_8b_undefAC(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> <i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef>, <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -372,27 +267,20 @@ define void @st3_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounw
 ; CHECK-LABEL: define void @st3_8b_undefBC(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> <i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef, i8 -1, i8 undef>, <24 x i32> <i32 0, i32 1, i32 8, i32 2, i32 3, i32 10, i32 4, i32 5, i32 12, i32 6, i32 7, i32 14, i32 8, i32 9, i32 16, i32 10, i32 11, i32 18, i32 12, i32 13, i32 20, i32 14, i32 15, i32 22>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <24 x i8> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -404,23 +292,17 @@ define void @st3_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) noun
 ; CHECK-LABEL: define void @st3_8b_undefABC(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
 ; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <24 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
-; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; CHECK:       5:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK:       6:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -432,15 +314,10 @@ define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P)
 ; CHECK-LABEL: define void @st4_8b(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <8 x i8> [[TMP5]], <8 x i8> [[TMP7]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -448,27 +325,13 @@ define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P)
 ; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
 ; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
-; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
-; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP13]], ptr [[TMP16]], align 1
-; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <8 x i8> [[TMP7]] to i64
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP26]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
-; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
-; CHECK:       31:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       32:
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -480,13 +343,9 @@ define void @st4_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-LABEL: define void @st4_8b_undefA(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> [[TMP5]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -494,27 +353,13 @@ define void @st4_8b_undefA(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP4]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP25:%.*]] = icmp ne i64 [[TMP24]], 0
-; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP25]], i32 [[TMP6]], i32 [[TMP23]]
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP26]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP29:%.*]], label [[TMP30:%.*]], !prof [[PROF0]]
-; CHECK:       29:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       30:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -526,13 +371,9 @@ define void @st4_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-LABEL: define void @st4_8b_undefB(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> [[TMP5]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -540,24 +381,13 @@ define void @st4_8b_undefB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -569,13 +399,9 @@ define void @st4_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-LABEL: define void @st4_8b_undefC(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP5]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -583,24 +409,13 @@ define void @st4_8b_undefC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -612,13 +427,9 @@ define void @st4_8b_undefD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-LABEL: define void @st4_8b_undefD(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i8> [[TMP5]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -626,24 +437,13 @@ define void @st4_8b_undefD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, p
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i8> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> [[B]], <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -655,35 +455,21 @@ define void @st4_8b_undefAB(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefAB(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> [[TMP7]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
-; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
-; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP8]], ptr [[TMP11]], align 1
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP17]]
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP6]], i32 [[TMP20]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP23:%.*]], label [[TMP24:%.*]], !prof [[PROF0]]
-; CHECK:       23:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       24:
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -695,11 +481,8 @@ define void @st4_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefAC(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -707,24 +490,13 @@ define void @st4_8b_undefAC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i64 [[TMP19]], 0
-; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
-; CHECK:       24:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       25:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -736,11 +508,8 @@ define void @st4_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefBC(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -748,21 +517,13 @@ define void @st4_8b_undefBC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
-; CHECK:       21:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       22:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -774,11 +535,8 @@ define void @st4_8b_undefBD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefBD(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <8 x i8> [[TMP3]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -786,21 +544,13 @@ define void @st4_8b_undefBD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x i8> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
-; CHECK:       21:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       22:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -812,30 +562,20 @@ define void @st4_8b_undefABC(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefABC(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> [[TMP5]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -847,30 +587,20 @@ define void @st4_8b_undefABD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefABD(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> [[TMP5]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> [[C]], <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -882,30 +612,20 @@ define void @st4_8b_undefACD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefACD(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> [[TMP1]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i64 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> [[B]], <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -917,27 +637,20 @@ define void @st4_8b_undefBCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D,
 ; CHECK-LABEL: define void @st4_8b_undefBCD(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> [[A]], <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -949,23 +662,17 @@ define void @st4_8b_undefABCD(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D
 ; CHECK-LABEL: define void @st4_8b_undefABCD(
 ; CHECK-SAME: <8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]], <8 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
 ; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, ptr [[TMP5]], align 1
-; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; CHECK:       5:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK:       6:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -983,31 +690,20 @@ define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind sanitize_memor
 ; CHECK-LABEL: define void @st2_16b(
 ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP3]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <32 x i8> [[TMP7]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i128 [[TMP14]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
-; CHECK:       19:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       20:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1019,13 +715,9 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind
 ; CHECK-LABEL: define void @st3_16b(
 ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP3]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> undef, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -1033,24 +725,13 @@ define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <48 x i8> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <16 x i8> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], <16 x i8> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1062,15 +743,10 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr
 ; CHECK-LABEL: define void @st4_16b(
 ; CHECK-SAME: <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i8> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP3]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
 ; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> [[TMP7]], <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
@@ -1078,27 +754,13 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr
 ; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
 ; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
-; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
-; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <64 x i8> [[TMP13]], ptr [[TMP16]], align 1
-; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <16 x i8> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i128 [[TMP20]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <16 x i8> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i128 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <16 x i8> [[TMP7]] to i128
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i128 [[TMP26]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
-; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
-; CHECK:       31:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       32:
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[A]], <16 x i8> [[B]], <16 x i8> [[C]], <16 x i8> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1116,31 +778,20 @@ define void @st2_4h(<4 x i16> %A, <4 x i16> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_4h(
 ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <8 x i16> [[TMP7]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <4 x i16> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
-; CHECK:       19:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       20:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1152,13 +803,9 @@ define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_4h(
 ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x i16> [[TMP5]], <4 x i16> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
@@ -1166,24 +813,13 @@ define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <12 x i16> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x i16> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <4 x i16> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], <4 x i16> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1195,15 +831,10 @@ define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr
 ; CHECK-LABEL: define void @st4_4h(
 ; CHECK-SAME: <4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], <4 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i16> [[TMP5]], <4 x i16> [[TMP7]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
@@ -1211,27 +842,13 @@ define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr
 ; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
 ; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
-; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
-; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <16 x i16> [[TMP13]], ptr [[TMP16]], align 1
-; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x i16> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <4 x i16> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <4 x i16> [[TMP7]] to i64
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP26]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
-; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
-; CHECK:       31:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       32:
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> [[A]], <4 x i16> [[B]], <4 x i16> [[C]], <4 x i16> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1249,31 +866,20 @@ define void @st2_8h(<8 x i16> %A, <8 x i16> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_8h(
 ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <16 x i16> [[TMP7]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i128 [[TMP14]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
-; CHECK:       19:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       20:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1285,13 +891,9 @@ define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_8h(
 ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <8 x i16> [[TMP5]], <8 x i16> undef, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -1299,24 +901,13 @@ define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <24 x i16> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <8 x i16> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], <8 x i16> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1328,15 +919,10 @@ define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr
 ; CHECK-LABEL: define void @st4_8h(
 ; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i16> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP3]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
 ; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <8 x i16> [[TMP5]], <8 x i16> [[TMP7]], <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
@@ -1344,27 +930,13 @@ define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr
 ; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
 ; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
-; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
-; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <32 x i16> [[TMP13]], ptr [[TMP16]], align 1
-; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x i16> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i128 [[TMP20]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <8 x i16> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i128 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <8 x i16> [[TMP7]] to i128
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i128 [[TMP26]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
-; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
-; CHECK:       31:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       32:
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> [[A]], <8 x i16> [[B]], <8 x i16> [[C]], <8 x i16> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1382,31 +954,20 @@ define void @st2_2s(<2 x i32> %A, <2 x i32> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_2s(
 ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <4 x i32> [[TMP7]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <2 x i32> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
-; CHECK:       19:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       20:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1418,13 +979,9 @@ define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_2s(
 ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i32> [[TMP5]], <2 x i32> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -1432,24 +989,13 @@ define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <6 x i32> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i32> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i32> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], <2 x i32> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1461,15 +1007,10 @@ define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr
 ; CHECK-LABEL: define void @st4_2s(
 ; CHECK-SAME: <2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> [[C:%.*]], <2 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <2 x i32> [[TMP5]], <2 x i32> [[TMP7]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -1477,27 +1018,13 @@ define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr
 ; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
 ; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
-; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
-; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <8 x i32> [[TMP13]], ptr [[TMP16]], align 1
-; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <2 x i32> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <2 x i32> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <2 x i32> [[TMP7]] to i64
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP26]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
-; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
-; CHECK:       31:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       32:
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> [[A]], <2 x i32> [[B]], <2 x i32> [[C]], <2 x i32> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1513,31 +1040,20 @@ define void @st2_4s(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_4s(
 ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <8 x i32> [[TMP7]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i128 [[TMP14]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
-; CHECK:       19:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       20:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1549,13 +1065,9 @@ define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_4s(
 ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
@@ -1563,24 +1075,13 @@ define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <12 x i32> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], <4 x i32> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1592,15 +1093,10 @@ define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr
 ; CHECK-LABEL: define void @st4_4s(
 ; CHECK-SAME: <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i32> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> [[TMP7]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
@@ -1608,27 +1104,13 @@ define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr
 ; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
 ; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
-; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
-; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <16 x i32> [[TMP13]], ptr [[TMP16]], align 1
-; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i128 [[TMP20]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <4 x i32> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i128 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <4 x i32> [[TMP7]] to i128
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i128 [[TMP26]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
-; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
-; CHECK:       31:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       32:
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> [[A]], <4 x i32> [[B]], <4 x i32> [[C]], <4 x i32> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1647,31 +1129,20 @@ define void @st2_1d(<1 x i64> %A, <1 x i64> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_1d(
 ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP3]], <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <2 x i64> [[TMP7]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP14]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
-; CHECK:       19:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       20:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1683,13 +1154,9 @@ define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_1d(
 ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP3]], <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <1 x i64> [[TMP5]], <1 x i64> undef, <2 x i32> <i32 0, i32 1>
@@ -1697,24 +1164,13 @@ define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <3 x i64> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <1 x i64> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], <1 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1726,15 +1182,10 @@ define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr
 ; CHECK-LABEL: define void @st4_1d(
 ; CHECK-SAME: <1 x i64> [[A:%.*]], <1 x i64> [[B:%.*]], <1 x i64> [[C:%.*]], <1 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load <1 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP3]], <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <1 x i64> [[TMP5]], <1 x i64> [[TMP7]], <2 x i32> <i32 0, i32 1>
@@ -1742,27 +1193,13 @@ define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr
 ; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
 ; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
-; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
-; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <4 x i64> [[TMP13]], ptr [[TMP16]], align 1
-; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <1 x i64> [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP20]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <1 x i64> [[TMP5]] to i64
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <1 x i64> [[TMP7]] to i64
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP26]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
-; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
-; CHECK:       31:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       32:
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> [[A]], <1 x i64> [[B]], <1 x i64> [[C]], <1 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1780,31 +1217,20 @@ define void @st2_2d(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize_memory
 ; CHECK-LABEL: define void @st2_2d(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 193514046488576
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 35184372088832
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
 ; CHECK-NEXT:    store <4 x i64> [[TMP7]], ptr [[TMP10]], align 1
-; CHECK-NEXT:    [[TMP14:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i128 [[TMP14]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP6]], i32 [[TMP16]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
-; CHECK:       19:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       20:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1816,29 +1242,19 @@ define void @st2_2d_undefA(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
 ; CHECK-LABEL: define void @st2_2d_undefA(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
-; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
-; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
 ; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr [[TMP8]], align 1
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i128 [[TMP12]], 0
-; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP15]], i32 [[TMP4]], i32 [[TMP14]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
-; CHECK:       17:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       7:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       18:
+; CHECK:       8:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1850,26 +1266,19 @@ define void @st2_2d_undefB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitize
 ; CHECK-LABEL: define void @st2_2d_undefB(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 193514046488576
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 35184372088832
-; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4
-; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
 ; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr [[TMP8]], align 1
-; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; CHECK:       14:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       7:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       15:
+; CHECK:       8:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1881,23 +1290,17 @@ define void @st2_2d_undefAB(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind sanitiz
 ; CHECK-LABEL: define void @st2_2d_undefAB(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
 ; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
-; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; CHECK:       5:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK:       6:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1909,13 +1312,9 @@ define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind s
 ; CHECK-LABEL: define void @st3_2d(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -1923,24 +1322,13 @@ define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind s
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1952,11 +1340,8 @@ define void @st3_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-LABEL: define void @st3_2d_undefA(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -1964,24 +1349,13 @@ define void @st3_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i128 [[TMP19]], 0
-; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
-; CHECK:       24:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       25:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -1993,11 +1367,8 @@ define void @st3_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-LABEL: define void @st3_2d_undefB(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2005,21 +1376,13 @@ define void @st3_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
-; CHECK:       21:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       22:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2031,32 +1394,21 @@ define void @st3_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nou
 ; CHECK-LABEL: define void @st3_2d_undefC(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> <i64 -1, i64 undef, i64 -1, i64 undef>, <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
 ; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
-; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
-; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP8]], ptr [[TMP11]], align 1
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i128 [[TMP15]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP18:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP6]], i32 [[TMP17]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2068,30 +1420,20 @@ define void @st3_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-LABEL: define void @st3_2d_undefAB(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> [[TMP5]], <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2103,30 +1445,20 @@ define void @st3_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-LABEL: define void @st3_2d_undefAC(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> <i64 -1, i64 undef, i64 -1, i64 undef>, <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2138,27 +1470,20 @@ define void @st3_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) no
 ; CHECK-LABEL: define void @st3_2d_undefBC(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> <i64 -1, i64 undef, i64 -1, i64 undef>, <6 x i32> <i32 0, i32 1, i32 2, i32 2, i32 3, i32 4>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <6 x i64> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2170,23 +1495,17 @@ define void @st3_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) n
 ; CHECK-LABEL: define void @st3_2d_undefABC(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
 ; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <6 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
-; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; CHECK:       5:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK:       6:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2198,15 +1517,10 @@ define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr
 ; CHECK-LABEL: define void @st4_2d(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> [[TMP7]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2214,27 +1528,13 @@ define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr
 ; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 193514046488576
 ; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 35184372088832
-; CHECK-NEXT:    [[TMP18:%.*]] = and i64 [[TMP17]], -4
-; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP13]], ptr [[TMP16]], align 1
-; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i128 [[TMP20]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP23:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i128 [[TMP23]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP6]], i32 [[TMP22]]
-; CHECK-NEXT:    [[TMP26:%.*]] = bitcast <2 x i64> [[TMP7]] to i128
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i128 [[TMP26]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP25]]
-; CHECK-NEXT:    [[TMP29:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    [[TMP30:%.*]] = select i1 [[TMP29]], i32 [[TMP10]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP31:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
-; CHECK:       31:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP10]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP17:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
+; CHECK:       12:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       32:
+; CHECK:       13:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2250,13 +1550,9 @@ define void @st4_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-LABEL: define void @st4_2d_undefA(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> [[TMP5]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2264,27 +1560,13 @@ define void @st4_2d_undefA(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP4]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP25:%.*]] = icmp ne i128 [[TMP24]], 0
-; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP25]], i32 [[TMP6]], i32 [[TMP23]]
-; CHECK-NEXT:    [[TMP27:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP28:%.*]] = select i1 [[TMP27]], i32 [[TMP8]], i32 [[TMP26]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP29:%.*]], label [[TMP30:%.*]], !prof [[PROF0]]
-; CHECK:       29:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       30:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2296,13 +1578,9 @@ define void @st4_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-LABEL: define void @st4_2d_undefB(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> [[TMP5]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2310,24 +1588,13 @@ define void @st4_2d_undefB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2339,13 +1606,9 @@ define void @st4_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-LABEL: define void @st4_2d_undefC(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP5]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2353,24 +1616,13 @@ define void @st4_2d_undefC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2382,13 +1634,9 @@ define void @st4_2d_undefD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-LABEL: define void @st4_2d_undefD(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2396,24 +1644,13 @@ define void @st4_2d_undefD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %
 ; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 193514046488576
 ; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 35184372088832
-; CHECK-NEXT:    [[TMP16:%.*]] = and i64 [[TMP15]], -4
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP11]], ptr [[TMP14]], align 1
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP21:%.*]] = bitcast <2 x i64> [[TMP5]] to i128
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i128 [[TMP21]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP20]]
-; CHECK-NEXT:    [[TMP24:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    [[TMP25:%.*]] = select i1 [[TMP24]], i32 [[TMP8]], i32 [[TMP23]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP7]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP26:%.*]], label [[TMP27:%.*]], !prof [[PROF0]]
-; CHECK:       26:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP8]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; CHECK:       11:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       27:
+; CHECK:       12:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2425,35 +1662,21 @@ define void @st4_2d_undefAB(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefAB(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> [[TMP7]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
-; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
-; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP8]], ptr [[TMP11]], align 1
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i128 [[TMP15]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i128 [[TMP18]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP4]], i32 [[TMP17]]
-; CHECK-NEXT:    [[TMP21:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP21]], i32 [[TMP6]], i32 [[TMP20]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP23:%.*]], label [[TMP24:%.*]], !prof [[PROF0]]
-; CHECK:       23:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       24:
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2465,11 +1688,8 @@ define void @st4_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefAC(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2477,24 +1697,13 @@ define void @st4_2d_undefAC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i128 [[TMP19]], 0
-; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
-; CHECK:       24:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       25:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2506,11 +1715,8 @@ define void @st4_2d_undefAD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefAD(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2518,24 +1724,13 @@ define void @st4_2d_undefAD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP20:%.*]] = icmp ne i128 [[TMP19]], 0
-; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP20]], i32 [[TMP4]], i32 [[TMP18]]
-; CHECK-NEXT:    [[TMP22:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP6]], i32 [[TMP21]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP24:%.*]], label [[TMP25:%.*]], !prof [[PROF0]]
-; CHECK:       24:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       25:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2547,11 +1742,8 @@ define void @st4_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefBC(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2559,21 +1751,13 @@ define void @st4_2d_undefBC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
-; CHECK:       21:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       22:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2585,11 +1769,8 @@ define void @st4_2d_undefBD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefBD(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -2597,21 +1778,13 @@ define void @st4_2d_undefBD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 193514046488576
 ; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 35184372088832
-; CHECK-NEXT:    [[TMP14:%.*]] = and i64 [[TMP13]], -4
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP9]], ptr [[TMP12]], align 1
-; CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i128 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP17]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP6]], i32 [[TMP18]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP21:%.*]], label [[TMP22:%.*]], !prof [[PROF0]]
-; CHECK:       21:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
+; CHECK:       10:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       22:
+; CHECK:       11:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2623,32 +1796,21 @@ define void @st4_2d_undefCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefCD(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 193514046488576
 ; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 35184372088832
-; CHECK-NEXT:    [[TMP13:%.*]] = and i64 [[TMP12]], -4
-; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP8]], ptr [[TMP11]], align 1
-; CHECK-NEXT:    [[TMP15:%.*]] = bitcast <2 x i64> [[TMP3]] to i128
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i128 [[TMP15]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP2]]
-; CHECK-NEXT:    [[TMP18:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP6]], i32 [[TMP17]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP20:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
-; CHECK:       20:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP12:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
+; CHECK:       9:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       21:
+; CHECK:       10:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> [[B]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2660,30 +1822,20 @@ define void @st4_2d_undefABC(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefABC(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 48) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 48) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> [[TMP5]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> [[D]], ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2695,30 +1847,20 @@ define void @st4_2d_undefABD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefABD(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> [[TMP5]], <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> [[C]], <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2730,30 +1872,20 @@ define void @st4_2d_undefACD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefACD(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> <i64 -1, i64 -1>, <2 x i64> [[TMP1]], <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
-; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP2]], i32 0
-; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP4]], i32 [[TMP15]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
-; CHECK:       18:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       19:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> [[B]], <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2765,27 +1897,20 @@ define void @st4_2d_undefBCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64>
 ; CHECK-LABEL: define void @st4_2d_undefBCD(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
 ; CHECK-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
 ; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 193514046488576
 ; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 35184372088832
-; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
-; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
 ; CHECK-NEXT:    store <8 x i64> [[TMP6]], ptr [[TMP9]], align 1
-; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP4]], i32 [[TMP2]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP15:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
-; CHECK:       15:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       16:
+; CHECK:       9:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> [[A]], <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;
@@ -2797,23 +1922,17 @@ define void @st4_2d_undefABCD(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64
 ; CHECK-LABEL: define void @st4_2d_undefABCD(
 ; CHECK-SAME: <2 x i64> [[A:%.*]], <2 x i64> [[B:%.*]], <2 x i64> [[C:%.*]], <2 x i64> [[D:%.*]], ptr [[P:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 64) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 193514046488576
 ; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 35184372088832
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    store <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, ptr [[TMP5]], align 1
-; CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; CHECK:       5:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK:       6:
 ; CHECK-NEXT:    call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, ptr [[P]])
 ; CHECK-NEXT:    ret void
 ;

>From 39b39af0b26b39b55f2ef0b8603df58cf1880e6a Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 18:07:33 +0000
Subject: [PATCH 12/14] Use arg_size() instead of (getNumOperands() - 1)

---
 .../Transforms/Instrumentation/MemorySanitizer.cpp    | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 8ec41edc2646f..67b263fb0e498 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3923,10 +3923,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   /// Calculates the shadow for interleaving 2, 3 or 4 vectors
   /// (e.g., for Arm NEON vector store).
   Value *interleaveShadow(IRBuilder<> &IRB, IntrinsicInst &I) {
-    // Call arguments only
-    int numArgOperands = I.getNumOperands() - 1;
+    // Don't use getNumOperands() because it includes the callee
+    int numArgOperands = I.arg_size();
     assert(numArgOperands >= 1);
 
+    // The last arg operand is the output
     int numVectors = numArgOperands - 1;
 
     for (int i = 0; i < numVectors; i++) {
@@ -3969,9 +3970,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
     Value *interleavedShadow = interleaveShadow(IRB, I);
 
-    // Call arguments only
-    int numArgOperands = I.getNumOperands() - 1;
+    // Don't use getNumOperands() because it includes the callee
+    int numArgOperands = I.arg_size();
     assert(numArgOperands >= 1);
+
+    // The last arg operand is the output
     Value *Addr = I.getArgOperand(numArgOperands - 1);
 
     Value *ShadowPtr, *OriginPtr;

>From 1970568ab31879094666b20d6f09b39682c20864 Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 19:20:29 +0000
Subject: [PATCH 13/14] Revert setOriginForNaryOp

---
 .../Instrumentation/MemorySanitizer.cpp       | 24 ++++++-------------
 1 file changed, 7 insertions(+), 17 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 67b263fb0e498..3652b4561f8b6 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2505,21 +2505,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   using OriginCombiner = Combiner<false>;
 
   /// Propagate origin for arbitrary operation.
-  ///
-  /// Optionally skips n trailing operands.
-  void setOriginForNaryOp(Instruction &I, unsigned int skipLastOperands = 0) {
+  void setOriginForNaryOp(Instruction &I) {
     if (!MS.TrackOrigins)
       return;
     IRBuilder<> IRB(&I);
     OriginCombiner OC(this, IRB);
-
-    if (skipLastOperands > 0)
-      assert((I.getNumOperands() > skipLastOperands) &&
-             "Insufficient number of operands to skip!");
-
-    for (unsigned int i = 0; i < I.getNumOperands() - skipLastOperands; i++)
-      OC.Add(I.getOperand(i));
-
+    for (Use &Op : I.operands())
+      OC.Add(Op.get());
     OC.Done(&I);
   }
 
@@ -3983,12 +3975,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     IRB.CreateAlignedStore(interleavedShadow, ShadowPtr, Align(1));
 
     if (MS.TrackOrigins) {
-      // We don't use the last two operands to compute the origin, because:
-      // - the last operand is the callee
-      //   e.g., 'declare void @llvm.aarch64.neon.st2.v8i16.p0(...'
-      // - the second-last operand is the return value
-      //   e.g., '%arraydecay74 = getelementptr inbounds ...'
-      setOriginForNaryOp(I, 2);
+      OriginCombiner OC(this, IRB);
+      for (int i = 0; i < numArgOperands - 1; i++)
+        OC.Add(I.getOperand(i));
+      OC.Done(&I);
     }
 
     if (ClCheckAccessAddress)

>From 1cebde812e77dadbe38a39cd21878a79605e409c Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Wed, 17 Jul 2024 19:50:40 +0000
Subject: [PATCH 14/14] Move pointer check earlier

---
 llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp      | 5 ++---
 .../test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll | 2 +-
 2 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 3652b4561f8b6..49395641f2155 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3968,6 +3968,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
 
     // The last arg operand is the output
     Value *Addr = I.getArgOperand(numArgOperands - 1);
+    if (ClCheckAccessAddress)
+      insertShadowCheck(Addr, &I);
 
     Value *ShadowPtr, *OriginPtr;
     std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
@@ -3980,9 +3982,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
         OC.Add(I.getOperand(i));
       OC.Done(&I);
     }
-
-    if (ClCheckAccessAddress)
-      insertShadowCheck(Addr, &I);
   }
 
   void visitIntrinsicInst(IntrinsicInst &I) {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
index d7e61e814ba7a..b18e8395fbc49 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/neon_vst.ll
@@ -23,7 +23,7 @@ define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind sanitize_memory {
 ; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
 ; CHECK-NEXT:    store <16 x i8> [[TMP7]], ptr [[TMP10]], align 1
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
-; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP19:%.*]], label [[TMP20:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0:![0-9]+]]
 ; CHECK:       8:
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
 ; CHECK-NEXT:    unreachable



More information about the cfe-commits mailing list