[llvm] 21c3df4 - MemorySanitizer: Convert tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 27 18:24:35 PST 2022


Author: Matt Arsenault
Date: 2022-11-27T21:24:24-05:00
New Revision: 21c3df4b858476bbc9b2b5af96cfef41f5715715

URL: https://github.com/llvm/llvm-project/commit/21c3df4b858476bbc9b2b5af96cfef41f5715715
DIFF: https://github.com/llvm/llvm-project/commit/21c3df4b858476bbc9b2b5af96cfef41f5715715.diff

LOG: MemorySanitizer: Convert tests to opaque pointers

Required fixups:
  alloca.ll
  avx2-intrinsics-x86.ll
  csr.ll
  instrumentation-with-call-threshold.ll
  msan_asm_conservative.ll
  msan_basic.ll
  msan_debug_info.ll
  msan_kernel_basic.ll
  msan_x86intrinsics.ll
  mul_by_constant.ll

Added: 
    

Modified: 
    llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
    llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
    llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
    llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
    llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
    llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
    llvm/test/Instrumentation/MemorySanitizer/X86/vararg.ll
    llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
    llvm/test/Instrumentation/MemorySanitizer/alloca.ll
    llvm/test/Instrumentation/MemorySanitizer/atomics.ll
    llvm/test/Instrumentation/MemorySanitizer/attributes.ll
    llvm/test/Instrumentation/MemorySanitizer/avx-intrinsics-x86.ll
    llvm/test/Instrumentation/MemorySanitizer/avx2-intrinsics-x86.ll
    llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll
    llvm/test/Instrumentation/MemorySanitizer/byval.ll
    llvm/test/Instrumentation/MemorySanitizer/check-array.ll
    llvm/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll
    llvm/test/Instrumentation/MemorySanitizer/check-struct.ll
    llvm/test/Instrumentation/MemorySanitizer/check_access_address.ll
    llvm/test/Instrumentation/MemorySanitizer/clmul.ll
    llvm/test/Instrumentation/MemorySanitizer/csr.ll
    llvm/test/Instrumentation/MemorySanitizer/disambiguate-origin.ll
    llvm/test/Instrumentation/MemorySanitizer/freeze.ll
    llvm/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
    llvm/test/Instrumentation/MemorySanitizer/libatomic.ll
    llvm/test/Instrumentation/MemorySanitizer/manual-shadow.ll
    llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_llvm_strip_invariant.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
    llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
    llvm/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
    llvm/test/Instrumentation/MemorySanitizer/no-check-rt-unaligned.ll
    llvm/test/Instrumentation/MemorySanitizer/nosanitize.ll
    llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll
    llvm/test/Instrumentation/MemorySanitizer/origin-alignment.ll
    llvm/test/Instrumentation/MemorySanitizer/origin-array.ll
    llvm/test/Instrumentation/MemorySanitizer/reduce.ll
    llvm/test/Instrumentation/MemorySanitizer/sse-intrinsics-x86.ll
    llvm/test/Instrumentation/MemorySanitizer/sse2-intrinsics-x86.ll
    llvm/test/Instrumentation/MemorySanitizer/sse41-intrinsics-x86.ll
    llvm/test/Instrumentation/MemorySanitizer/store-long-origin.ll
    llvm/test/Instrumentation/MemorySanitizer/store-origin.ll
    llvm/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll
    llvm/test/Instrumentation/MemorySanitizer/unreachable.ll
    llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll
    llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
index f108afe97e717..0bd0968b0f1a2 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
@@ -3,15 +3,14 @@
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-linux-gnu"
 
-%struct.__va_list = type { i8*, i8*, i8*, i32, i32 }
+%struct.__va_list = type { ptr, ptr, ptr, i32, i32 }
 
 define i32 @foo(i32 %guard, ...) {
   %vl = alloca %struct.__va_list, align 8
-  %1 = bitcast %struct.__va_list* %vl to i8*
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
-  call void @llvm.va_start(i8* %1)
-  call void @llvm.va_end(i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
+  call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+  call void @llvm.va_start(ptr %vl)
+  call void @llvm.va_end(ptr %vl)
+  call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
   ret i32 0
 }
 
@@ -30,26 +29,26 @@ define i32 @foo(i32 %guard, ...) {
 ; Propagate the GR shadow values on for the va_list::__gp_top, adjust the 
 ; offset in the __msan_va_arg_tls based on va_list:__gp_off, and finally
 ; issue the memcpy.
-; CHECK: [[GRP:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i64 {{%.*}}
+; CHECK: [[GRP:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i64 {{%.*}}
 ; CHECK: [[GRSIZE:%.*]] = sub i64 64, {{%.*}}
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{%.*}}, i8* align 8 [[GRP]], i64 [[GRSIZE]], i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 [[GRP]], i64 [[GRSIZE]], i1 false)
 
 ; Propagate the VR shadow values on for the va_list::__vr_top, adjust the 
 ; offset in the __msan_va_arg_tls based on va_list:__vr_off, and finally
 ; issue the memcpy.
-; CHECK: [[VRP:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i64 {{%.*}}
+; CHECK: [[VRP:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i64 {{%.*}}
 ; CHECK: [[VRSIZE:%.*]] = sub i64 128, {{%.*}}
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{%.*}}, i8* align 8 [[VRP]], i64 [[VRSIZE]], i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 [[VRP]], i64 [[VRSIZE]], i1 false)
 
 ; Copy the remaining shadow values on the va_list::__stack position (it is
 ; on the constant offset of 192 from __msan_va_arg_tls).
-; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, i8* {{%.*}}, i32 192
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 {{%.*}}, i8* align 16 [[STACK]], i64 {{%.*}}, i1 false)
+; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i32 192
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 16 {{%.*}}, ptr align 16 [[STACK]], i64 {{%.*}}, i1 false)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.va_start(i8*) #2
-declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 define i32 @bar() {
   %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i32 2, double 3.000000e+00, 
@@ -98,6 +97,6 @@ entry:
 }
 
 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
-; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
 declare i64 @sum(i64 %n, ...)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
index ac710db0f4471..85e98b703018e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
@@ -4,12 +4,11 @@ target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
 target triple = "mips64--linux"
 
 define i32 @foo(i32 %guard, ...) {
-  %vl = alloca i8*, align 8
-  %1 = bitcast i8** %vl to i8*
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
-  call void @llvm.va_start(i8* %1)
-  call void @llvm.va_end(i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
+  %vl = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+  call void @llvm.va_start(ptr %vl)
+  call void @llvm.va_end(ptr %vl)
+  call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
   ret i32 0
 }
 
@@ -20,12 +19,12 @@ define i32 @foo(i32 %guard, ...) {
 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
 
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[C]], i8* align 8 bitcast ({{.*}} @__msan_va_arg_tls to i8*), i64 [[B]], i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[B]], i1 false)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.va_start(i8*) #2
-declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 define i32 @bar() {
   %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -36,9 +35,9 @@ define i32 @bar() {
 ; array.  The first argument is stored at position 4, since it's right
 ; justified.
 ; CHECK-LABEL: @bar
-; CHECK: store i32 0, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 4) to i32*), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
+; CHECK: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
 ; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check multiple fixed arguments.
@@ -49,8 +48,8 @@ define i32 @bar2() {
 }
 
 ; CHECK-LABEL: @bar2
-; CHECK: store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_va_arg_tls, i32 0, i32 0), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
+; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
 ; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
@@ -76,6 +75,6 @@ entry:
 
 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
 ; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
 declare i64 @sum(i64 %n, ...)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
index 5bbed6f1cde76..be7ccf00221ef 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
@@ -4,12 +4,11 @@ target datalayout = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
 target triple = "mips64el--linux"
 
 define i32 @foo(i32 %guard, ...) {
-  %vl = alloca i8*, align 8
-  %1 = bitcast i8** %vl to i8*
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
-  call void @llvm.va_start(i8* %1)
-  call void @llvm.va_end(i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
+  %vl = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+  call void @llvm.va_start(ptr %vl)
+  call void @llvm.va_end(ptr %vl)
+  call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
   ret i32 0
 }
 
@@ -20,12 +19,12 @@ define i32 @foo(i32 %guard, ...) {
 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
 
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[C]], i8* align 8 bitcast ({{.*}} @__msan_va_arg_tls to i8*), i64 [[B]], i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[B]], i1 false)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.va_start(i8*) #2
-declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 define i32 @bar() {
   %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -35,9 +34,9 @@ define i32 @bar() {
 ; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
 ; array.
 ; CHECK-LABEL: @bar
-; CHECK: store i32 0, i32* bitcast ([100 x i64]* @__msan_va_arg_tls to i32*), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
+; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
 ; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check multiple fixed arguments.
@@ -48,8 +47,8 @@ define i32 @bar2() {
 }
 
 ; CHECK-LABEL: @bar2
-; CHECK: store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_va_arg_tls, i32 0, i32 0), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
+; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
 ; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
@@ -75,6 +74,6 @@ entry:
 
 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
 ; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
 declare i64 @sum(i64 %n, ...)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
index af83a32d51f43..5e9003511a638 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
@@ -4,12 +4,11 @@ target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64--linux"
 
 define i32 @foo(i32 %guard, ...) {
-  %vl = alloca i8*, align 8
-  %1 = bitcast i8** %vl to i8*
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
-  call void @llvm.va_start(i8* %1)
-  call void @llvm.va_end(i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
+  %vl = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+  call void @llvm.va_start(ptr %vl)
+  call void @llvm.va_end(ptr %vl)
+  call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
   ret i32 0
 }
 
@@ -20,12 +19,12 @@ define i32 @foo(i32 %guard, ...) {
 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
 
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[C]], i8* align 8 bitcast ({{.*}} @__msan_va_arg_tls to i8*), i64 [[B]], i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[B]], i1 false)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.va_start(i8*) #2
-declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 define i32 @bar() {
   %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -36,9 +35,9 @@ define i32 @bar() {
 ; array.  The first argument is stored at position 4, since it's right
 ; justified.
 ; CHECK-LABEL: @bar
-; CHECK: store i32 0, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 4) to i32*), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
+; CHECK: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
 ; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check vector argument.
@@ -51,7 +50,7 @@ define i32 @bar2() {
 ; corresponds to offset 8+ of parameter save area - so the offset from
 ; __msan_va_arg_tls is actually misaligned.
 ; CHECK-LABEL: @bar2
-; CHECK: store <2 x i64> zeroinitializer, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to <2 x i64>*), align 8
+; CHECK: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
 ; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check i64 array.
@@ -61,7 +60,7 @@ define i32 @bar4() {
 }
 
 ; CHECK-LABEL: @bar4
-; CHECK: store [2 x i64] zeroinitializer, [2 x i64]* bitcast ([100 x i64]* @__msan_va_arg_tls to [2 x i64]*), align 8
+; CHECK: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8
 ; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check i128 array.
@@ -71,27 +70,27 @@ define i32 @bar5() {
 }
 
 ; CHECK-LABEL: @bar5
-; CHECK: store [2 x i128] zeroinitializer, [2 x i128]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to [2 x i128]*), align 8
+; CHECK: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
 ; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check 8-aligned byval.
-define i32 @bar6([2 x i64]* %arg) {
-  %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval([2 x i64]) align 8 %arg)
+define i32 @bar6(ptr %arg) {
+  %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 %arg)
   ret i32 %1
 }
 
 ; CHECK-LABEL: @bar6
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 bitcast ([100 x i64]* @__msan_va_arg_tls to i8*), i8* align 8 {{.*}}, i64 16, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 {{.*}}, i64 16, i1 false)
 ; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check 16-aligned byval.
-define i32 @bar7([4 x i64]* %arg) {
-  %1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval([4 x i64]) align 16 %arg)
+define i32 @bar7(ptr %arg) {
+  %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 %arg)
   ret i32 %1
 }
 
 ; CHECK-LABEL: @bar7
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i8*), i8* align 8 {{.*}}, i64 32, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 {{.*}}, i64 32, i1 false)
 ; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
 
 
@@ -118,6 +117,6 @@ entry:
 
 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
 ; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
 declare i64 @sum(i64 %n, ...)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
index 1ecb2680af2b5..70c76a81e1cd0 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
@@ -4,12 +4,11 @@ target datalayout = "e-m:e-i64:64-n32:64"
 target triple = "powerpc64le--linux"
 
 define i32 @foo(i32 %guard, ...) {
-  %vl = alloca i8*, align 8
-  %1 = bitcast i8** %vl to i8*
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
-  call void @llvm.va_start(i8* %1)
-  call void @llvm.va_end(i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
+  %vl = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+  call void @llvm.va_start(ptr %vl)
+  call void @llvm.va_end(ptr %vl)
+  call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
   ret i32 0
 }
 
@@ -20,12 +19,12 @@ define i32 @foo(i32 %guard, ...) {
 ; CHECK: [[B:%.*]] = add i64 0, [[A]]
 ; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
 
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[C]], i8* align 8 bitcast ({{.*}} @__msan_va_arg_tls to i8*), i64 [[B]], i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[B]], i1 false)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.va_start(i8*) #2
-declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 define i32 @bar() {
   %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
@@ -35,9 +34,9 @@ define i32 @bar() {
 ; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
 ; array.
 ; CHECK-LABEL: @bar
-; CHECK: store i32 0, i32* bitcast ([100 x i64]* @__msan_va_arg_tls to i32*), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i64*), align 8
-; CHECK: store i64 0, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 16) to i64*), align 8
+; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
 ; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check vector argument.
@@ -50,7 +49,7 @@ define i32 @bar2() {
 ; corresponds to offset 8+ of parameter save area - so the offset from
 ; __msan_va_arg_tls is actually misaligned.
 ; CHECK-LABEL: @bar2
-; CHECK: store <2 x i64> zeroinitializer, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to <2 x i64>*), align 8
+; CHECK: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
 ; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check i64 array.
@@ -60,7 +59,7 @@ define i32 @bar4() {
 }
 
 ; CHECK-LABEL: @bar4
-; CHECK: store [2 x i64] zeroinitializer, [2 x i64]* bitcast ([100 x i64]* @__msan_va_arg_tls to [2 x i64]*), align 8
+; CHECK: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8
 ; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check i128 array.
@@ -70,27 +69,27 @@ define i32 @bar5() {
 }
 
 ; CHECK-LABEL: @bar5
-; CHECK: store [2 x i128] zeroinitializer, [2 x i128]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to [2 x i128]*), align 8
+; CHECK: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
 ; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check 8-aligned byval.
-define i32 @bar6([2 x i64]* %arg) {
-  %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64]* byval([2 x i64]) align 8 %arg)
+define i32 @bar6(ptr %arg) {
+  %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 %arg)
   ret i32 %1
 }
 
 ; CHECK-LABEL: @bar6
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 bitcast ([100 x i64]* @__msan_va_arg_tls to i8*), i8* align 8 {{.*}}, i64 16, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 {{.*}}, i64 16, i1 false)
 ; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Check 16-aligned byval.
-define i32 @bar7([4 x i64]* %arg) {
-  %1 = call i32 (i32, ...) @foo(i32 0, [4 x i64]* byval([4 x i64]) align 16 %arg)
+define i32 @bar7(ptr %arg) {
+  %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 %arg)
   ret i32 %1
 }
 
 ; CHECK-LABEL: @bar7
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 8) to i8*), i8* align 8 {{.*}}, i64 32, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 {{.*}}, i64 32, i1 false)
 ; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls
 
 ; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
@@ -116,6 +115,6 @@ entry:
 
 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
 ; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
 declare i64 @sum(i64 %n, ...)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
index 4ac65bf1d7f56..c3da8d7618230 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
@@ -3,15 +3,14 @@
 target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"
 target triple = "s390x-unknown-linux-gnu"
 
-%struct.__va_list = type { i64, i64, i8*, i8* }
+%struct.__va_list = type { i64, i64, ptr, ptr }
 
 define i64 @foo(i64 %guard, ...) {
   %vl = alloca %struct.__va_list, align 8
-  %1 = bitcast %struct.__va_list* %vl to i8*
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* %1)
-  call void @llvm.va_start(i8* %1)
-  call void @llvm.va_end(i8* %1)
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* %1)
+  call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+  call void @llvm.va_start(ptr %vl)
+  call void @llvm.va_end(ptr %vl)
+  call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
   ret i64 0
 }
 
@@ -26,13 +25,13 @@ define i64 @foo(i64 %guard, ...) {
 ; We expect two memcpy operations: one for the register save area, and one for
 ; the overflow arg area.
 
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{%.*}}, i8* align 8 {{%.*}}, i64 160, i1 false)
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{%.*}}, i8* align 8 {{%.*}}, i64 [[A]], i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 160, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 [[A]], i1 false)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.va_start(i8*) #2
-declare void @llvm.va_end(i8*) #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 declare i32 @random_i32()
 declare i64 @random_i64()
@@ -119,8 +118,8 @@ entry:
 
 ; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
 ; CHECK-LABEL: @many_args
-; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
-; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
 
 declare i64 @sum(i64 %n, ...)
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg.ll
index 631f5d4c0c992..eb43ea793b9df 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg.ll
@@ -4,12 +4,11 @@
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
 target triple = "i386-unknown-linux-gnu"
 
-define void @VaStart(i8* %s, ...) {
+define void @VaStart(ptr %s, ...) {
 entry:
-  %vl = alloca i8*, align 4
-  %vl1 = bitcast i8** %vl to i8*
-  call void @llvm.va_start(i8* %vl1)
+  %vl = alloca ptr, align 4
+  call void @llvm.va_start(ptr %vl)
   ret void
 }
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
index 936cc1e985416..32d43e11fbd9b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
@@ -12,7 +12,7 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-%struct.__va_list_tag = type { i32, i32, i8*, i8* }
+%struct.__va_list_tag = type { i32, i32, ptr, ptr }
 
 define dso_local i32 @test(i32 %a, i32 %b, i32 %c) local_unnamed_addr {
 entry:
@@ -33,29 +33,27 @@ entry:
 define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
 entry:
   %args = alloca [1 x %struct.__va_list_tag], align 16
-  %0 = bitcast [1 x %struct.__va_list_tag]* %args to i8*
-  call void @llvm.lifetime.start.p0i8(i64 24, i8* nonnull %0) #2
-  call void @llvm.va_start(i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #2
+  call void @llvm.va_start(ptr nonnull %args)
   %cmp9 = icmp sgt i32 %n, 0
   br i1 %cmp9, label %for.body.lr.ph, label %for.end
 
-; CHECK: call void @llvm.memcpy.{{.*}} [[SHADOW_COPY:%[_0-9a-z]+]], {{.*}} bitcast ({{.*}} @__msan_va_arg_tls to i8*)
-; CHECK-ORIGIN: call void @llvm.memcpy{{.*}} [[ORIGIN_COPY:%[_0-9a-z]+]], {{.*}} bitcast ({{.*}} @__msan_va_arg_origin_tls to i8*)
+; CHECK: call void @llvm.memcpy.{{.*}} [[SHADOW_COPY:%[_0-9a-z]+]], {{.*}} @__msan_va_arg_tls
+; CHECK-ORIGIN: call void @llvm.memcpy{{.*}} [[ORIGIN_COPY:%[_0-9a-z]+]], {{.*}} @__msan_va_arg_origin_tls
 
 ; CHECK: call void @llvm.va_start
 ; CHECK: call void @llvm.memcpy.{{.*}}, {{.*}} [[SHADOW_COPY]], i{{.*}} [[REGSAVE:[0-9]+]]
 ; CHECK-ORIGIN: call void @llvm.memcpy.{{.*}}, {{.*}} [[ORIGIN_COPY]], i{{.*}} [[REGSAVE]]
 
-; CHECK: [[OVERFLOW_SHADOW:%[_0-9a-z]+]] = getelementptr i8, i8* [[SHADOW_COPY]], i{{.*}} [[REGSAVE]]
+; CHECK: [[OVERFLOW_SHADOW:%[_0-9a-z]+]] = getelementptr i8, ptr [[SHADOW_COPY]], i{{.*}} [[REGSAVE]]
 ; CHECK: call void @llvm.memcpy.{{.*}}[[OVERFLOW_SHADOW]]
-; CHECK-ORIGIN: [[OVERFLOW_ORIGIN:%[_0-9a-z]+]] = getelementptr i8, i8* [[ORIGIN_COPY]], i{{.*}} [[REGSAVE]]
+; CHECK-ORIGIN: [[OVERFLOW_ORIGIN:%[_0-9a-z]+]] = getelementptr i8, ptr [[ORIGIN_COPY]], i{{.*}} [[REGSAVE]]
 ; CHECK-ORIGIN: call void @llvm.memcpy.{{.*}}[[OVERFLOW_ORIGIN]]
 
 for.body.lr.ph:                                   ; preds = %entry
-  %gp_offset_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %args, i64 0, i64 0, i32 0
-  %1 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %args, i64 0, i64 0, i32 3
-  %overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %args, i64 0, i64 0, i32 2
-  %gp_offset.pre = load i32, i32* %gp_offset_p, align 16
+  %0 = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %args, i64 0, i64 0, i32 3
+  %overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag], ptr %args, i64 0, i64 0, i32 2
+  %gp_offset.pre = load i32, ptr %args, align 16
   br label %for.body
 
 for.body:                                         ; preds = %vaarg.end, %for.body.lr.ph
@@ -66,48 +64,47 @@ for.body:                                         ; preds = %vaarg.end, %for.bod
   br i1 %fits_in_gp, label %vaarg.in_reg, label %vaarg.in_mem
 
 vaarg.in_reg:                                     ; preds = %for.body
-  %reg_save_area = load i8*, i8** %1, align 16
-  %2 = sext i32 %gp_offset to i64
-  %3 = getelementptr i8, i8* %reg_save_area, i64 %2
-  %4 = add i32 %gp_offset, 8
-  store i32 %4, i32* %gp_offset_p, align 16
+  %reg_save_area = load ptr, ptr %0, align 16
+  %1 = sext i32 %gp_offset to i64
+  %2 = getelementptr i8, ptr %reg_save_area, i64 %1
+  %3 = add i32 %gp_offset, 8
+  store i32 %3, ptr %args, align 16
   br label %vaarg.end
 
 vaarg.in_mem:                                     ; preds = %for.body
-  %overflow_arg_area = load i8*, i8** %overflow_arg_area_p, align 8
-  %overflow_arg_area.next = getelementptr i8, i8* %overflow_arg_area, i64 8
-  store i8* %overflow_arg_area.next, i8** %overflow_arg_area_p, align 8
+  %overflow_arg_area = load ptr, ptr %overflow_arg_area_p, align 8
+  %overflow_arg_area.next = getelementptr i8, ptr %overflow_arg_area, i64 8
+  store ptr %overflow_arg_area.next, ptr %overflow_arg_area_p, align 8
   br label %vaarg.end
 
 vaarg.end:                                        ; preds = %vaarg.in_mem, %vaarg.in_reg
-  %gp_offset12 = phi i32 [ %4, %vaarg.in_reg ], [ %gp_offset, %vaarg.in_mem ]
-  %vaarg.addr.in = phi i8* [ %3, %vaarg.in_reg ], [ %overflow_arg_area, %vaarg.in_mem ]
-  %vaarg.addr = bitcast i8* %vaarg.addr.in to i32*
-  %5 = load i32, i32* %vaarg.addr, align 4
-  %add = add nsw i32 %5, %sum.011
+  %gp_offset12 = phi i32 [ %3, %vaarg.in_reg ], [ %gp_offset, %vaarg.in_mem ]
+  %vaarg.addr.in = phi ptr [ %2, %vaarg.in_reg ], [ %overflow_arg_area, %vaarg.in_mem ]
+  %4 = load i32, ptr %vaarg.addr.in, align 4
+  %add = add nsw i32 %4, %sum.011
   %inc = add nuw nsw i32 %i.010, 1
   %exitcond = icmp eq i32 %inc, %n
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %vaarg.end, %entry
   %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %vaarg.end ]
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 24, i8* nonnull %0) #2
+  call void @llvm.va_end(ptr nonnull %args)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #2
   ret i32 %sum.0.lcssa
 }
 
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #2
+declare void @llvm.va_start(ptr) #2
 
 ; Function Attrs: nounwind
-declare void @llvm.va_end(i8*) #2
+declare void @llvm.va_end(ptr) #2
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 declare dso_local i80 @sum_i80(i32, ...) local_unnamed_addr
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
index d0657698e73e8..25a44ecd9d241 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
@@ -20,11 +20,11 @@ entry:
 }
 
 ; CHECK-LABEL: define void @static(
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4, i8* {{.*}} [[IDPTR]] {{.*}}, i8* {{.*}} [[DESCR]],
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4, i8* {{.*}} [[IDPTR]]
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr %unique_x, i64 4, ptr [[IDPTR]], ptr [[DESCR]])
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr %unique_x, i64 4, ptr [[IDPTR]])
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 ; CHECK: ret void
 
 
@@ -37,11 +37,11 @@ l:
 }
 
 ; CHECK-LABEL: define void @dynamic(
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 ; CHECK: ret void
 
 define void @array() sanitize_memory {
@@ -51,11 +51,11 @@ entry:
 }
 
 ; CHECK-LABEL: define void @array(
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 20, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 20)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 20,
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 20,
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 20,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 20, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 20)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 20,
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 20,
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 20,
 ; CHECK: ret void
 
 define void @array32() sanitize_memory {
@@ -65,11 +65,11 @@ entry:
 }
 
 ; CHECK-LABEL: define void @array32(
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 20, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 20)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 20,
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 20,
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 20,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 20, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 20)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 20,
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 20,
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 20,
 ; CHECK: ret void
 
 define void @array_non_const(i64 %cnt) sanitize_memory {
@@ -80,11 +80,11 @@ entry:
 
 ; CHECK-LABEL: define void @array_non_const(
 ; CHECK: %[[A:.*]] = mul i64 4, %cnt
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 %[[A]], i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 %[[A]])
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 %[[A]],
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 %[[A]],
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 %[[A]],
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 %[[A]], i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 %[[A]])
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 %[[A]],
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 %[[A]],
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 %[[A]],
 ; CHECK: ret void
 
 define void @array_non_const32(i32 %cnt) sanitize_memory {
@@ -96,11 +96,11 @@ entry:
 ; CHECK-LABEL: define void @array_non_const32(
 ; CHECK: %[[Z:.*]] = zext i32 %cnt to i64
 ; CHECK: %[[A:.*]] = mul i64 4, %[[Z]]
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 %[[A]], i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 %[[A]])
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 %[[A]],
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 %[[A]],
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 %[[A]],
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 %[[A]], i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 %[[A]])
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 %[[A]],
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 %[[A]],
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 %[[A]],
 ; CHECK: ret void
 
 ; Check that the local is unpoisoned in the absence of sanitize_memory
@@ -111,27 +111,26 @@ entry:
 }
 
 ; CHECK-LABEL: define void @unpoison_local(
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 0, i64 20, i1 false)
-; CALL: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 0, i64 20, i1 false)
-; ORIGIN-NOT: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 20,
-; ORIGIN-LEAN-NOT: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 20,
-; KMSAN: call void @__msan_unpoison_alloca(i8* {{.*}}, i64 20)
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 0, i64 20, i1 false)
+; CALL: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 0, i64 20, i1 false)
+; ORIGIN-NOT: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 20,
+; ORIGIN-LEAN-NOT: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 20,
+; KMSAN: call void @__msan_unpoison_alloca(ptr {{.*}}, i64 20)
 ; CHECK: ret void
 
 ; Check that every llvm.lifetime.start() causes poisoning of locals.
 define void @lifetime_start() sanitize_memory {
 entry:
   %x = alloca i32, align 4
-  %c = bitcast i32* %x to i8*
   br label %another_bb
 
 another_bb:
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %c)
-  store i32 7, i32* %x
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %c)
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %c)
-  store i32 8, i32* %x
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %c)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+  store i32 7, ptr %x
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+  store i32 8, ptr %x
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
   ret void
 }
 
@@ -141,27 +140,26 @@ another_bb:
 ; CHECK-LABEL: another_bb:
 
 ; CHECK: call void @llvm.lifetime.start
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 
 ; CHECK: call void @llvm.lifetime.start
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 ; CHECK: ret void
 
 ; Make sure variable-length arrays are handled correctly.
 define void @lifetime_start_var(i64 %cnt) sanitize_memory {
 entry:
   %x = alloca i32, i64 %cnt, align 4
-  %c = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 -1, i8* nonnull %c)
-  call void @llvm.lifetime.end.p0i8(i64 -1, i8* nonnull %c)
+  call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %x)
+  call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %x)
   ret void
 }
 
@@ -170,11 +168,11 @@ entry:
 ; CHECK: %x = alloca i32, i64 %cnt
 ; CHECK: call void @llvm.lifetime.start
 ; CHECK: %[[A:.*]] = mul i64 4, %cnt
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 %[[A]], i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 %[[A]])
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 %[[A]],
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 %[[A]],
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 %[[A]],
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 %[[A]], i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 %[[A]])
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 %[[A]],
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 %[[A]],
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 %[[A]],
 ; CHECK: call void @llvm.lifetime.end
 ; CHECK: ret void
 
@@ -187,72 +185,69 @@ entry:
   %x = alloca i32, align 4
   %y = alloca i32, align 4
   %z = alloca i32, align 4
-  %cx = bitcast i32* %x to i8*
-  %cy = bitcast i32* %y to i8*
-  %cz = bitcast i32* %z to i8*
   %tobool = icmp eq i8 %v, 0
-  %xy = select i1 %tobool, i32* %x, i32* %y
-  %cxcy = select i1 %tobool, i8* %cx, i8* %cy
+  %xy = select i1 %tobool, ptr %x, ptr %y
+  %cxcy = select i1 %tobool, ptr %x, ptr %y
   br label %another_bb
 
 another_bb:
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cz)
-  store i32 7, i32* %z
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cz)
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cz)
-  store i32 7, i32* %z
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cz)
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cxcy)
-  store i32 8, i32* %xy
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cxcy)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %z)
+  store i32 7, ptr %z
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %z)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %z)
+  store i32 7, ptr %z
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %z)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cxcy)
+  store i32 8, ptr %xy
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cxcy)
   ret void
 }
 
 ; CHECK-LABEL: define void @lifetime_no_alloca(
 ; CHECK-LABEL: entry:
 ; CHECK: %x = alloca i32
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 ; CHECK: %y = alloca i32
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 ; CHECK: %z = alloca i32
-; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 
 ; There're two lifetime intrinsics for %z, but we must instrument it only once.
-; INLINE-NOT: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL-NOT: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN-NOT: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN-NOT: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN-NOT: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE-NOT: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL-NOT: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN-NOT: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN-NOT: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN-NOT: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 ; CHECK-LABEL: another_bb:
 
 ; CHECK: call void @llvm.lifetime.start
-; INLINE-NOT: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL-NOT: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN-NOT: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN-NOT: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN-NOT: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE-NOT: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL-NOT: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN-NOT: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN-NOT: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN-NOT: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 ; CHECK: call void @llvm.lifetime.end
 ; CHECK: call void @llvm.lifetime.start
-; INLINE-NOT: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 4, i1 false)
-; CALL-NOT: call void @__msan_poison_stack(i8* {{.*}}, i64 4)
-; ORIGIN-NOT: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 4,
-; ORIGIN-LEAN-NOT: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 4,
-; KMSAN-NOT: call void @__msan_poison_alloca(i8* {{.*}}, i64 4,
+; INLINE-NOT: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 -1, i64 4, i1 false)
+; CALL-NOT: call void @__msan_poison_stack(ptr {{.*}}, i64 4)
+; ORIGIN-NOT: call void @__msan_set_alloca_origin_with_descr(ptr {{.*}}, i64 4,
+; ORIGIN-LEAN-NOT: call void @__msan_set_alloca_origin_no_descr(ptr {{.*}}, i64 4,
+; KMSAN-NOT: call void @__msan_poison_alloca(ptr {{.*}}, i64 4,
 ; CHECK: call void @llvm.lifetime.end
 
 
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/atomics.ll b/llvm/test/Instrumentation/MemorySanitizer/atomics.ll
index a38b49e1fdea5..0d52064b0a334 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/atomics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/atomics.ll
@@ -7,9 +7,9 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; atomicrmw xchg: store clean shadow, return clean shadow
 
-define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
+define i32 @AtomicRmwXchg(ptr %p, i32 %x) sanitize_memory {
 entry:
-  %0 = atomicrmw xchg i32* %p, i32 %x seq_cst
+  %0 = atomicrmw xchg ptr %p, i32 %x seq_cst
   ret i32 %0
 }
 
@@ -21,24 +21,24 @@ entry:
 
 ; atomicrmw xchg ptr: exactly the same as above
 
-define i32* @AtomicRmwXchgPtr(i32** %p, i32* %x) sanitize_memory {
+define ptr @AtomicRmwXchgPtr(ptr %p, ptr %x) sanitize_memory {
 entry:
-  %0 = atomicrmw xchg i32** %p, i32* %x seq_cst
-  ret i32* %0
+  %0 = atomicrmw xchg ptr %p, ptr %x seq_cst
+  ret ptr %0
 }
 
 ; CHECK-LABEL: @AtomicRmwXchgPtr
 ; CHECK: store i64 0,
 ; CHECK: atomicrmw xchg {{.*}} seq_cst
 ; CHECK: store i64 0, {{.*}} @__msan_retval_tls
-; CHECK: ret i32*
+; CHECK: ret ptr
 
 
 ; atomicrmw max: exactly the same as above
 
-define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
+define i32 @AtomicRmwMax(ptr %p, i32 %x) sanitize_memory {
 entry:
-  %0 = atomicrmw max i32* %p, i32 %x seq_cst
+  %0 = atomicrmw max ptr %p, i32 %x seq_cst
   ret i32 %0
 }
 
@@ -51,9 +51,9 @@ entry:
 
 ; cmpxchg: the same as above, but also check %a shadow
 
-define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
+define i32 @Cmpxchg(ptr %p, i32 %a, i32 %b) sanitize_memory {
 entry:
-  %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i32 %a, i32 %b seq_cst seq_cst
   %0 = extractvalue { i32, i1 } %pair, 0
   ret i32 %0
 }
@@ -71,9 +71,9 @@ entry:
 
 ; relaxed cmpxchg: bump up to "release monotonic"
 
-define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
+define i32 @CmpxchgMonotonic(ptr %p, i32 %a, i32 %b) sanitize_memory {
 entry:
-  %pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
+  %pair = cmpxchg ptr %p, i32 %a, i32 %b monotonic monotonic
   %0 = extractvalue { i32, i1 } %pair, 0
   ret i32 %0
 }
@@ -91,119 +91,119 @@ entry:
 
 ; atomic load: preserve alignment, load shadow value after app value
 
-define i32 @AtomicLoad(i32* %p) sanitize_memory {
+define i32 @AtomicLoad(ptr %p) sanitize_memory {
 entry:
-  %0 = load atomic i32, i32* %p seq_cst, align 16
+  %0 = load atomic i32, ptr %p seq_cst, align 16
   ret i32 %0
 }
 
 ; CHECK-LABEL: @AtomicLoad
-; CHECK: load atomic i32, i32* {{.*}} seq_cst, align 16
-; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
+; CHECK: load atomic i32, ptr {{.*}} seq_cst, align 16
+; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, ptr {{.*}}, align 16
 ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
 ; CHECK: ret i32
 
 
 ; atomic load: preserve alignment, load shadow value after app value
 
-define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
+define i32 @AtomicLoadAcquire(ptr %p) sanitize_memory {
 entry:
-  %0 = load atomic i32, i32* %p acquire, align 16
+  %0 = load atomic i32, ptr %p acquire, align 16
   ret i32 %0
 }
 
 ; CHECK-LABEL: @AtomicLoadAcquire
-; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
-; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
+; CHECK: load atomic i32, ptr {{.*}} acquire, align 16
+; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, ptr {{.*}}, align 16
 ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
 ; CHECK: ret i32
 
 
 ; atomic load monotonic: bump up to load acquire
 
-define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
+define i32 @AtomicLoadMonotonic(ptr %p) sanitize_memory {
 entry:
-  %0 = load atomic i32, i32* %p monotonic, align 16
+  %0 = load atomic i32, ptr %p monotonic, align 16
   ret i32 %0
 }
 
 ; CHECK-LABEL: @AtomicLoadMonotonic
-; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
-; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
+; CHECK: load atomic i32, ptr {{.*}} acquire, align 16
+; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, ptr {{.*}}, align 16
 ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
 ; CHECK: ret i32
 
 
 ; atomic load unordered: bump up to load acquire
 
-define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
+define i32 @AtomicLoadUnordered(ptr %p) sanitize_memory {
 entry:
-  %0 = load atomic i32, i32* %p unordered, align 16
+  %0 = load atomic i32, ptr %p unordered, align 16
   ret i32 %0
 }
 
 ; CHECK-LABEL: @AtomicLoadUnordered
-; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
-; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
+; CHECK: load atomic i32, ptr {{.*}} acquire, align 16
+; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, ptr {{.*}}, align 16
 ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
 ; CHECK: ret i32
 
 
 ; atomic store: preserve alignment, store clean shadow value before app value
 
-define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
+define void @AtomicStore(ptr %p, i32 %x) sanitize_memory {
 entry:
-  store atomic i32 %x, i32* %p seq_cst, align 16
+  store atomic i32 %x, ptr %p seq_cst, align 16
   ret void
 }
 
 ; CHECK-LABEL: @AtomicStore
 ; CHECK-NOT: @__msan_param_tls
-; CHECK: store i32 0, i32* {{.*}}, align 16
-; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
+; CHECK: store i32 0, ptr {{.*}}, align 16
+; CHECK: store atomic i32 %x, ptr %p seq_cst, align 16
 ; CHECK: ret void
 
 
 ; atomic store: preserve alignment, store clean shadow value before app value
 
-define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
+define void @AtomicStoreRelease(ptr %p, i32 %x) sanitize_memory {
 entry:
-  store atomic i32 %x, i32* %p release, align 16
+  store atomic i32 %x, ptr %p release, align 16
   ret void
 }
 
 ; CHECK-LABEL: @AtomicStoreRelease
 ; CHECK-NOT: @__msan_param_tls
-; CHECK: store i32 0, i32* {{.*}}, align 16
-; CHECK: store atomic i32 %x, i32* %p release, align 16
+; CHECK: store i32 0, ptr {{.*}}, align 16
+; CHECK: store atomic i32 %x, ptr %p release, align 16
 ; CHECK: ret void
 
 
 ; atomic store monotonic: bumped up to store release
 
-define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
+define void @AtomicStoreMonotonic(ptr %p, i32 %x) sanitize_memory {
 entry:
-  store atomic i32 %x, i32* %p monotonic, align 16
+  store atomic i32 %x, ptr %p monotonic, align 16
   ret void
 }
 
 ; CHECK-LABEL: @AtomicStoreMonotonic
 ; CHECK-NOT: @__msan_param_tls
-; CHECK: store i32 0, i32* {{.*}}, align 16
-; CHECK: store atomic i32 %x, i32* %p release, align 16
+; CHECK: store i32 0, ptr {{.*}}, align 16
+; CHECK: store atomic i32 %x, ptr %p release, align 16
 ; CHECK: ret void
 
 
 ; atomic store unordered: bumped up to store release
 
-define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
+define void @AtomicStoreUnordered(ptr %p, i32 %x) sanitize_memory {
 entry:
-  store atomic i32 %x, i32* %p unordered, align 16
+  store atomic i32 %x, ptr %p unordered, align 16
   ret void
 }
 
 ; CHECK-LABEL: @AtomicStoreUnordered
 ; CHECK-NOT: @__msan_param_tls
-; CHECK: store i32 0, i32* {{.*}}, align 16
-; CHECK: store atomic i32 %x, i32* %p release, align 16
+; CHECK: store i32 0, ptr {{.*}}, align 16
+; CHECK: store atomic i32 %x, ptr %p release, align 16
 ; CHECK: ret void

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/attributes.ll b/llvm/test/Instrumentation/MemorySanitizer/attributes.ll
index c2825ab3fc630..c0cfc7bf2da92 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/attributes.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/attributes.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
 declare void @a_() sanitize_memory readnone
 declare void @b_() sanitize_memory readonly
 declare void @c_() sanitize_memory writeonly
-declare void @d_(i32* %p) sanitize_memory writeonly argmemonly
+declare void @d_(ptr %p) sanitize_memory writeonly argmemonly
 declare void @e_() sanitize_memory speculatable
 
 define void @a() sanitize_memory readnone {
@@ -31,10 +31,10 @@ entry:
   ret void
 }
 
-define void @d(i32* %p) sanitize_memory writeonly argmemonly {
+define void @d(ptr %p) sanitize_memory writeonly argmemonly {
 entry:
-  call void @d_(i32* %p)
-  call void @d_(i32* %p) writeonly argmemonly
+  call void @d_(ptr %p)
+  call void @d_(ptr %p) writeonly argmemonly
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/avx-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/avx-intrinsics-x86.ll
index ae2ae61d4cf28..9d3300207aafa 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/avx-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/avx-intrinsics-x86.ll
@@ -6,12 +6,12 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define <4 x double> @test_x86_avx_addsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_addsub_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -22,12 +22,12 @@ declare <4 x double> @llvm.x86.avx.addsub.pd.256(<4 x double>, <4 x double>) nou
 
 define <8 x float> @test_x86_avx_addsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_addsub_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -38,14 +38,14 @@ declare <8 x float> @llvm.x86.avx.addsub.ps.256(<8 x float>, <8 x float>) nounwi
 
 define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx_blendv_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 64) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <4 x i64> [[_MSPROP]], [[TMP3]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], <4 x double> [[A2:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[_MSPROP1]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) ; <<4 x double>> [#uses=1]
@@ -56,14 +56,14 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
 
 define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx_blendv_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 64) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], <8 x float> [[A2:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP1]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) ; <<8 x float>> [#uses=1]
@@ -74,14 +74,14 @@ declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x f
 
 define <4 x double> @test_x86_avx_cmp_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_cmp_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = sext <4 x i1> [[TMP4]] to <4 x i64>
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double> %a0, <4 x double> %a1, i8 7) ; <<4 x double>> [#uses=1]
@@ -92,14 +92,14 @@ declare <4 x double> @llvm.x86.avx.cmp.pd.256(<4 x double>, <4 x double>, i8) no
 
 define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_cmp_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = sext <8 x i1> [[TMP4]] to <8 x i32>
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <8 x i32> [[TMP5]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP5]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1]
@@ -108,8 +108,8 @@ define <8 x float> @test_x86_avx_cmp_ps_256(<8 x float> %a0, <8 x float> %a1) #0
 
 define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_cmp_ps_256_pseudo_op(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
@@ -239,7 +239,7 @@ define <8 x float> @test_x86_avx_cmp_ps_256_pseudo_op(<8 x float> %a0, <8 x floa
 ; CHECK-NEXT:    [[TMP97:%.*]] = icmp ne <8 x i32> [[TMP96]], zeroinitializer
 ; CHECK-NEXT:    [[TMP98:%.*]] = sext <8 x i1> [[TMP97]] to <8 x i32>
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> [[A0]], <8 x float> [[A32]], i8 31)
-; CHECK-NEXT:    store <8 x i32> [[TMP98]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP98]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %a2 = call <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 0) ; <<8 x float>> [#uses=1]
@@ -281,7 +281,7 @@ declare <8 x float> @llvm.x86.avx.cmp.ps.256(<8 x float>, <8 x float>, i8) nounw
 
 define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_cvt_pd2_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -291,7 +291,7 @@ define <4 x float> @test_x86_avx_cvt_pd2_ps_256(<4 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double> %a0) ; <<4 x float>> [#uses=1]
@@ -302,7 +302,7 @@ declare <4 x float> @llvm.x86.avx.cvt.pd2.ps.256(<4 x double>) nounwind readnone
 
 define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_cvt_pd2dq_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -312,7 +312,7 @@ define <4 x i32> @test_x86_avx_cvt_pd2dq_256(<4 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double> %a0) ; <<4 x i32>> [#uses=1]
@@ -323,7 +323,7 @@ declare <4 x i32> @llvm.x86.avx.cvt.pd2dq.256(<4 x double>) nounwind readnone
 
 define <8 x i32> @test_x86_avx_cvt_ps2dq_256(<8 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_cvt_ps2dq_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -333,7 +333,7 @@ define <8 x i32> @test_x86_avx_cvt_ps2dq_256(<8 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1]
@@ -344,7 +344,7 @@ declare <8 x i32> @llvm.x86.avx.cvt.ps2dq.256(<8 x float>) nounwind readnone
 
 define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_cvtt_pd2dq_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -354,7 +354,7 @@ define <4 x i32> @test_x86_avx_cvtt_pd2dq_256(<4 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> %a0) ; <<4 x i32>> [#uses=1]
@@ -365,7 +365,7 @@ declare <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double>) nounwind readnone
 
 define <8 x i32> @test_x86_avx_cvtt_ps2dq_256(<8 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_cvtt_ps2dq_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -375,7 +375,7 @@ define <8 x i32> @test_x86_avx_cvtt_ps2dq_256(<8 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %a0) ; <<8 x i32>> [#uses=1]
@@ -386,8 +386,8 @@ declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>) nounwind readnone
 
 define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_dp_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -400,7 +400,7 @@ define <8 x float> @test_x86_avx_dp_ps_256(<8 x float> %a0, <8 x float> %a1) #0
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float> %a0, <8 x float> %a1, i8 7) ; <<8 x float>> [#uses=1]
@@ -411,12 +411,12 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi
 
 define <4 x double> @test_x86_avx_hadd_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_hadd_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -427,12 +427,12 @@ declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) nounw
 
 define <8 x float> @test_x86_avx_hadd_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_hadd_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -443,12 +443,12 @@ declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) nounwind
 
 define <4 x double> @test_x86_avx_hsub_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_hsub_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -459,12 +459,12 @@ declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) nounw
 
 define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_hsub_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -473,34 +473,34 @@ define <8 x float> @test_x86_avx_hsub_ps_256(<8 x float> %a0, <8 x float> %a1) #
 declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) nounwind readnone
 
 
-define <32 x i8> @test_x86_avx_ldu_dq_256(i8* %a0) #0 {
+define <32 x i8> @test_x86_avx_ldu_dq_256(ptr %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_ldu_dq_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint i8* [[A0:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to <32 x i8>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <32 x i8>, <32 x i8>* [[TMP4]], align 1
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP4]], align 1
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
 ; CHECK:       5:
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
-; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* [[A0]])
-; CHECK-NEXT:    store <32 x i8> [[_MSLD]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr [[A0]])
+; CHECK-NEXT:    store <32 x i8> [[_MSLD]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
-  %res = call <32 x i8> @llvm.x86.avx.ldu.dq.256(i8* %a0) ; <<32 x i8>> [#uses=1]
+  %res = call <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr %a0) ; <<32 x i8>> [#uses=1]
   ret <32 x i8> %res
 }
-declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readonly
+declare <32 x i8> @llvm.x86.avx.ldu.dq.256(ptr) nounwind readonly
 
 
-define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x i64> %mask) #0 {
+define <2 x double> @test_x86_avx_maskload_pd(ptr %a0, <2 x i64> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx_maskload_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -511,20 +511,20 @@ define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x i64> %mask) #0 {
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       5:
-; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.avx.maskload.pd(i8* [[A0:%.*]], <2 x i64> [[MASK:%.*]])
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.avx.maskload.pd(ptr [[A0:%.*]], <2 x i64> [[MASK:%.*]])
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
-  %res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %mask) ; <<2 x double>> [#uses=1]
+  %res = call <2 x double> @llvm.x86.avx.maskload.pd(ptr %a0, <2 x i64> %mask) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
-declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) nounwind readonly
+declare <2 x double> @llvm.x86.avx.maskload.pd(ptr, <2 x i64>) nounwind readonly
 
 
-define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x i64> %mask) #0 {
+define <4 x double> @test_x86_avx_maskload_pd_256(ptr %a0, <4 x i64> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx_maskload_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -535,20 +535,20 @@ define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x i64> %mask) #0 {
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       5:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* [[A0:%.*]], <4 x i64> [[MASK:%.*]])
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.maskload.pd.256(ptr [[A0:%.*]], <4 x i64> [[MASK:%.*]])
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
-  %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %mask) ; <<4 x double>> [#uses=1]
+  %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(ptr %a0, <4 x i64> %mask) ; <<4 x double>> [#uses=1]
   ret <4 x double> %res
 }
-declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) nounwind readonly
+declare <4 x double> @llvm.x86.avx.maskload.pd.256(ptr, <4 x i64>) nounwind readonly
 
 
-define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x i32> %mask) #0 {
+define <4 x float> @test_x86_avx_maskload_ps(ptr %a0, <4 x i32> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx_maskload_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -559,20 +559,20 @@ define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x i32> %mask) #0 {
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       5:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx.maskload.ps(i8* [[A0:%.*]], <4 x i32> [[MASK:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx.maskload.ps(ptr [[A0:%.*]], <4 x i32> [[MASK:%.*]])
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
-  %res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %mask) ; <<4 x float>> [#uses=1]
+  %res = call <4 x float> @llvm.x86.avx.maskload.ps(ptr %a0, <4 x i32> %mask) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
-declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>) nounwind readonly
+declare <4 x float> @llvm.x86.avx.maskload.ps(ptr, <4 x i32>) nounwind readonly
 
 
-define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x i32> %mask) #0 {
+define <8 x float> @test_x86_avx_maskload_ps_256(ptr %a0, <8 x i32> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx_maskload_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -583,21 +583,21 @@ define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x i32> %mask) #0 {
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       5:
-; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* [[A0:%.*]], <8 x i32> [[MASK:%.*]])
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.maskload.ps.256(ptr [[A0:%.*]], <8 x i32> [[MASK:%.*]])
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
-  %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %mask) ; <<8 x float>> [#uses=1]
+  %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(ptr %a0, <8 x i32> %mask) ; <<8 x float>> [#uses=1]
   ret <8 x float> %res
 }
-declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) nounwind readonly
+declare <8 x float> @llvm.x86.avx.maskload.ps.256(ptr, <8 x i32>) nounwind readonly
 
 
-define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) #0 {
+define void @test_x86_avx_maskstore_pd(ptr %a0, <2 x i64> %mask, <2 x double> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx_maskstore_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -611,20 +611,20 @@ define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x i64> %mask, <2 x double> %a
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       7:
-; CHECK-NEXT:    call void @llvm.x86.avx.maskstore.pd(i8* [[A0:%.*]], <2 x i64> [[MASK:%.*]], <2 x double> [[A2:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.avx.maskstore.pd(ptr [[A0:%.*]], <2 x i64> [[MASK:%.*]], <2 x double> [[A2:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2)
+  call void @llvm.x86.avx.maskstore.pd(ptr %a0, <2 x i64> %mask, <2 x double> %a2)
   ret void
 }
-declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind
+declare void @llvm.x86.avx.maskstore.pd(ptr, <2 x i64>, <2 x double>) nounwind
 
 
-define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) #0 {
+define void @test_x86_avx_maskstore_pd_256(ptr %a0, <4 x i64> %mask, <4 x double> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx_maskstore_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -638,20 +638,20 @@ define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x double
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       7:
-; CHECK-NEXT:    call void @llvm.x86.avx.maskstore.pd.256(i8* [[A0:%.*]], <4 x i64> [[MASK:%.*]], <4 x double> [[A2:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.avx.maskstore.pd.256(ptr [[A0:%.*]], <4 x i64> [[MASK:%.*]], <4 x double> [[A2:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %mask, <4 x double> %a2)
+  call void @llvm.x86.avx.maskstore.pd.256(ptr %a0, <4 x i64> %mask, <4 x double> %a2)
   ret void
 }
-declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) nounwind
+declare void @llvm.x86.avx.maskstore.pd.256(ptr, <4 x i64>, <4 x double>) nounwind
 
 
-define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) #0 {
+define void @test_x86_avx_maskstore_ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx_maskstore_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -665,20 +665,20 @@ define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       7:
-; CHECK-NEXT:    call void @llvm.x86.avx.maskstore.ps(i8* [[A0:%.*]], <4 x i32> [[MASK:%.*]], <4 x float> [[A2:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.avx.maskstore.ps(ptr [[A0:%.*]], <4 x i32> [[MASK:%.*]], <4 x float> [[A2:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2)
+  call void @llvm.x86.avx.maskstore.ps(ptr %a0, <4 x i32> %mask, <4 x float> %a2)
   ret void
 }
-declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind
+declare void @llvm.x86.avx.maskstore.ps(ptr, <4 x i32>, <4 x float>) nounwind
 
 
-define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) #0 {
+define void @test_x86_avx_maskstore_ps_256(ptr %a0, <8 x i32> %mask, <8 x float> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx_maskstore_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -692,23 +692,23 @@ define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x float>
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       7:
-; CHECK-NEXT:    call void @llvm.x86.avx.maskstore.ps.256(i8* [[A0:%.*]], <8 x i32> [[MASK:%.*]], <8 x float> [[A2:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.avx.maskstore.ps.256(ptr [[A0:%.*]], <8 x i32> [[MASK:%.*]], <8 x float> [[A2:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %mask, <8 x float> %a2)
+  call void @llvm.x86.avx.maskstore.ps.256(ptr %a0, <8 x i32> %mask, <8 x float> %a2)
   ret void
 }
-declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind
+declare void @llvm.x86.avx.maskstore.ps.256(ptr, <8 x i32>, <8 x float>) nounwind
 
 
 define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_max_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.max.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -719,12 +719,12 @@ declare <4 x double> @llvm.x86.avx.max.pd.256(<4 x double>, <4 x double>) nounwi
 
 define <8 x float> @test_x86_avx_max_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_max_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.max.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -735,12 +735,12 @@ declare <8 x float> @llvm.x86.avx.max.ps.256(<8 x float>, <8 x float>) nounwind
 
 define <4 x double> @test_x86_avx_min_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_min_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.min.pd.256(<4 x double> %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1]
@@ -751,12 +751,12 @@ declare <4 x double> @llvm.x86.avx.min.pd.256(<4 x double>, <4 x double>) nounwi
 
 define <8 x float> @test_x86_avx_min_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_min_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.min.ps.256(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
@@ -767,7 +767,7 @@ declare <8 x float> @llvm.x86.avx.min.ps.256(<8 x float>, <8 x float>) nounwind
 
 define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_movmsk_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -777,7 +777,7 @@ define i32 @test_x86_avx_movmsk_pd_256(<4 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %a0) ; <i32> [#uses=1]
@@ -788,7 +788,7 @@ declare i32 @llvm.x86.avx.movmsk.pd.256(<4 x double>) nounwind readnone
 
 define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_movmsk_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -798,7 +798,7 @@ define i32 @test_x86_avx_movmsk_ps_256(<8 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %a0) ; <i32> [#uses=1]
@@ -809,15 +809,15 @@ declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone
 
 define i32 @test_x86_avx_ptestc_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_ptestc_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a0, <4 x i64> %a1) ; <i32> [#uses=1]
@@ -828,15 +828,15 @@ declare i32 @llvm.x86.avx.ptestc.256(<4 x i64>, <4 x i64>) nounwind readnone
 
 define i32 @test_x86_avx_ptestnzc_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_ptestnzc_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %a0, <4 x i64> %a1) ; <i32> [#uses=1]
@@ -847,15 +847,15 @@ declare i32 @llvm.x86.avx.ptestnzc.256(<4 x i64>, <4 x i64>) nounwind readnone
 
 define i32 @test_x86_avx_ptestz_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_ptestz_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> [[A0:%.*]], <4 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a0, <4 x i64> %a1) ; <i32> [#uses=1]
@@ -866,10 +866,10 @@ declare i32 @llvm.x86.avx.ptestz.256(<4 x i64>, <4 x i64>) nounwind readnone
 
 define <8 x float> @test_x86_avx_rcp_ps_256(<8 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_rcp_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[TMP1]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1]
@@ -880,7 +880,7 @@ declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>) nounwind readnone
 
 define <4 x double> @test_x86_avx_round_pd_256(<4 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_round_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -890,7 +890,7 @@ define <4 x double> @test_x86_avx_round_pd_256(<4 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 7) ; <<4 x double>> [#uses=1]
@@ -901,7 +901,7 @@ declare <4 x double> @llvm.x86.avx.round.pd.256(<4 x double>, i32) nounwind read
 
 define <8 x float> @test_x86_avx_round_ps_256(<8 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_round_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -911,7 +911,7 @@ define <8 x float> @test_x86_avx_round_ps_256(<8 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 7) ; <<8 x float>> [#uses=1]
@@ -922,10 +922,10 @@ declare <8 x float> @llvm.x86.avx.round.ps.256(<8 x float>, i32) nounwind readno
 
 define <8 x float> @test_x86_avx_rsqrt_ps_256(<8 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_rsqrt_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[TMP1]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %a0) ; <<8 x float>> [#uses=1]
@@ -935,8 +935,8 @@ declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>) nounwind readnone
 
 define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vpermilvar_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -949,7 +949,7 @@ define <2 x double> @test_x86_avx_vpermilvar_pd(<2 x double> %a0, <2 x i64> %a1)
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> [[A0:%.*]], <2 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> %a1) ; <<2 x double>> [#uses=1]
@@ -960,8 +960,8 @@ declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) nounwi
 
 define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vpermilvar_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -974,7 +974,7 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256(<4 x double> %a0, <4 x i64>
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> [[A0:%.*]], <4 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> %a1) ; <<4 x double>> [#uses=1]
@@ -984,7 +984,7 @@ declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) no
 
 define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx_vpermilvar_pd_256_2(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
@@ -994,7 +994,7 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> [[A0:%.*]], <4 x i64> <i64 2, i64 0, i64 0, i64 2>)
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 0, i64 2>) ; <<4 x double>> [#uses=1]
@@ -1003,8 +1003,8 @@ define <4 x double> @test_x86_avx_vpermilvar_pd_256_2(<4 x double> %a0) #0 {
 
 define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -1017,16 +1017,16 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) #
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> [[A0:%.*]], <4 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
-define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>* %a1) #0 {
+define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, ptr %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
@@ -1034,11 +1034,11 @@ define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>*
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A2:%.*]] = load <4 x i32>, <4 x i32>* [[A1:%.*]], align 16
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <4 x i32>* [[A1]] to i64
+; CHECK-NEXT:    [[A2:%.*]] = load <4 x i32>, ptr [[A1:%.*]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[A1]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <4 x i32>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP7]], align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP7]], align 16
 ; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
 ; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP8]], 0
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <4 x i32> [[_MSLD]] to i128
@@ -1050,10 +1050,10 @@ define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>*
 ; CHECK-NEXT:    unreachable
 ; CHECK:       11:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> [[A0:%.*]], <4 x i32> [[A2]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
-  %a2 = load <4 x i32>, <4 x i32>* %a1
+  %a2 = load <4 x i32>, ptr %a1
   %res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a2) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -1062,8 +1062,8 @@ declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) nounwind
 
 define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vpermilvar_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -1076,7 +1076,7 @@ define <8 x float> @test_x86_avx_vpermilvar_ps_256(<8 x float> %a0, <8 x i32> %a
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1]
@@ -1087,15 +1087,15 @@ declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) noun
 
 define i32 @test_x86_avx_vtestc_pd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestc_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i1> [[TMP4]] to i2
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i2 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1106,15 +1106,15 @@ declare i32 @llvm.x86.avx.vtestc.pd(<2 x double>, <2 x double>) nounwind readnon
 
 define i32 @test_x86_avx_vtestc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestc_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %a0, <4 x double> %a1) ; <i32> [#uses=1]
@@ -1125,15 +1125,15 @@ declare i32 @llvm.x86.avx.vtestc.pd.256(<4 x double>, <4 x double>) nounwind rea
 
 define i32 @test_x86_avx_vtestc_ps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestc_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -1144,15 +1144,15 @@ declare i32 @llvm.x86.avx.vtestc.ps(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_avx_vtestc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestc_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i1> [[TMP4]] to i8
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %a0, <8 x float> %a1) ; <i32> [#uses=1]
@@ -1163,15 +1163,15 @@ declare i32 @llvm.x86.avx.vtestc.ps.256(<8 x float>, <8 x float>) nounwind readn
 
 define i32 @test_x86_avx_vtestnzc_pd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestnzc_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i1> [[TMP4]] to i2
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i2 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1182,15 +1182,15 @@ declare i32 @llvm.x86.avx.vtestnzc.pd(<2 x double>, <2 x double>) nounwind readn
 
 define i32 @test_x86_avx_vtestnzc_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestnzc_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %a0, <4 x double> %a1) ; <i32> [#uses=1]
@@ -1201,15 +1201,15 @@ declare i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double>, <4 x double>) nounwind r
 
 define i32 @test_x86_avx_vtestnzc_ps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestnzc_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -1220,15 +1220,15 @@ declare i32 @llvm.x86.avx.vtestnzc.ps(<4 x float>, <4 x float>) nounwind readnon
 
 define i32 @test_x86_avx_vtestnzc_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestnzc_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i1> [[TMP4]] to i8
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %a0, <8 x float> %a1) ; <i32> [#uses=1]
@@ -1239,15 +1239,15 @@ declare i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float>, <8 x float>) nounwind rea
 
 define i32 @test_x86_avx_vtestz_pd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestz_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i1> [[TMP4]] to i2
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i2 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1258,15 +1258,15 @@ declare i32 @llvm.x86.avx.vtestz.pd(<2 x double>, <2 x double>) nounwind readnon
 
 define i32 @test_x86_avx_vtestz_pd_256(<4 x double> %a0, <4 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestz_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> [[A0:%.*]], <4 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %a0, <4 x double> %a1) ; <i32> [#uses=1]
@@ -1277,15 +1277,15 @@ declare i32 @llvm.x86.avx.vtestz.pd.256(<4 x double>, <4 x double>) nounwind rea
 
 define i32 @test_x86_avx_vtestz_ps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestz_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i1> [[TMP4]] to i4
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i4 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -1296,15 +1296,15 @@ declare i32 @llvm.x86.avx.vtestz.ps(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_avx_vtestz_ps_256(<8 x float> %a0, <8 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx_vtestz_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <8 x i32> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i1> [[TMP4]] to i8
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i8 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> [[A0:%.*]], <8 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %a0, <8 x float> %a1) ; <i32> [#uses=1]
@@ -1336,99 +1336,96 @@ define void @test_x86_avx_vzeroupper() #0 {
 }
 declare void @llvm.x86.avx.vzeroupper() nounwind
 
-define void @movnt_dq(i8* %p, <2 x i64> %a1) nounwind #0 {
+define void @movnt_dq(ptr %p, <2 x i64> %a1) nounwind #0 {
 ; CHECK-LABEL: @movnt_dq(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[A2:%.*]] = add <2 x i64> [[A1:%.*]], <i64 1, i64 1>
 ; CHECK-NEXT:    [[_MSPROP1:%.*]] = shufflevector <2 x i64> [[_MSPROP]], <2 x i64> <i64 -1, i64 -1>, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[A3:%.*]] = shufflevector <2 x i64> [[A2]], <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast i8* [[P:%.*]] to <4 x i64>*
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
 ; CHECK:       3:
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <4 x i64>* [[CAST]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <4 x i64>*
-; CHECK-NEXT:    store <4 x i64> [[_MSPROP1]], <4 x i64>* [[TMP7]], align 32
-; CHECK-NEXT:    store <4 x i64> [[A3]], <4 x i64>* [[CAST]], align 32, !nontemporal !1
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store <4 x i64> [[_MSPROP1]], ptr [[TMP7]], align 32
+; CHECK-NEXT:    store <4 x i64> [[A3]], ptr [[P]], align 32, !nontemporal !1
 ; CHECK-NEXT:    ret void
 ;
   %a2 = add <2 x i64> %a1, <i64 1, i64 1>
   %a3 = shufflevector <2 x i64> %a2, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
-  tail call void @llvm.x86.avx.movnt.dq.256(i8* %p, <4 x i64> %a3) nounwind
+  tail call void @llvm.x86.avx.movnt.dq.256(ptr %p, <4 x i64> %a3) nounwind
   ret void
 }
-declare void @llvm.x86.avx.movnt.dq.256(i8*, <4 x i64>) nounwind
+declare void @llvm.x86.avx.movnt.dq.256(ptr, <4 x i64>) nounwind
 
-define void @movnt_ps(i8* %p, <8 x float> %a) nounwind #0 {
+define void @movnt_ps(ptr %p, <8 x float> %a) nounwind #0 {
 ; CHECK-LABEL: @movnt_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast i8* [[P:%.*]] to <8 x float>*
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
 ; CHECK:       3:
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <8 x float>* [[CAST]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <8 x i32>*
-; CHECK-NEXT:    store <8 x i32> [[TMP2]], <8 x i32>* [[TMP7]], align 32
-; CHECK-NEXT:    store <8 x float> [[A:%.*]], <8 x float>* [[CAST]], align 32, !nontemporal !1
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store <8 x i32> [[TMP2]], ptr [[TMP7]], align 32
+; CHECK-NEXT:    store <8 x float> [[A:%.*]], ptr [[P]], align 32, !nontemporal !1
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.x86.avx.movnt.ps.256(i8* %p, <8 x float> %a) nounwind
+  tail call void @llvm.x86.avx.movnt.ps.256(ptr %p, <8 x float> %a) nounwind
   ret void
 }
-declare void @llvm.x86.avx.movnt.ps.256(i8*, <8 x float>) nounwind
+declare void @llvm.x86.avx.movnt.ps.256(ptr, <8 x float>) nounwind
 
-define void @movnt_pd(i8* %p, <4 x double> %a1) nounwind #0 {
+define void @movnt_pd(ptr %p, <4 x double> %a1) nounwind #0 {
   ; add operation forces the execution domain.
 ; CHECK-LABEL: @movnt_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[A2:%.*]] = fadd <4 x double> [[A1:%.*]], zeroinitializer
-; CHECK-NEXT:    [[CAST:%.*]] = bitcast i8* [[P:%.*]] to <4 x double>*
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
 ; CHECK:       3:
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR8]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <4 x double>* [[CAST]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <4 x i64>*
-; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], <4 x i64>* [[TMP7]], align 32
-; CHECK-NEXT:    store <4 x double> [[A2]], <4 x double>* [[CAST]], align 32, !nontemporal !1
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store <4 x i64> [[_MSPROP]], ptr [[TMP7]], align 32
+; CHECK-NEXT:    store <4 x double> [[A2]], ptr [[P]], align 32, !nontemporal !1
 ; CHECK-NEXT:    ret void
 ;
   %a2 = fadd <4 x double> %a1, <double 0x0, double 0x0, double 0x0, double 0x0>
-  tail call void @llvm.x86.avx.movnt.pd.256(i8* %p, <4 x double> %a2) nounwind
+  tail call void @llvm.x86.avx.movnt.pd.256(ptr %p, <4 x double> %a2) nounwind
   ret void
 }
-declare void @llvm.x86.avx.movnt.pd.256(i8*, <4 x double>) nounwind
+declare void @llvm.x86.avx.movnt.pd.256(ptr, <4 x double>) nounwind
 
 
 define <2 x i64> @test_x86_pclmulqdq(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_pclmulqdq(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <2 x i64> [[TMP3]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]], i8 0)
-; CHECK-NEXT:    store <2 x i64> [[_MSPROP]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %a0, <2 x i64> %a1, i8 0) ; <<2 x i64>> [#uses=1]

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/avx2-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/avx2-intrinsics-x86.ll
index 9ea9611c3bbc4..2759ca721f50d 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/avx2-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/avx2-intrinsics-x86.ll
@@ -6,8 +6,8 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_packssdw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -15,7 +15,7 @@ define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i32>
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> [[TMP4]], <8 x i32> [[TMP6]])
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP_VECTOR_PACK]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
@@ -29,7 +29,7 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() #0 {
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP_VECTOR_PACK]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
@@ -39,8 +39,8 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() #0 {
 
 define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_packsswb(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i16>
@@ -48,7 +48,7 @@ define <32 x i8> @test_x86_avx2_packsswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i16>
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> [[TMP4]], <16 x i16> [[TMP6]])
 ; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <32 x i8> [[_MSPROP_VECTOR_PACK]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    store <32 x i8> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
   %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
@@ -62,7 +62,7 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() #0 {
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> zeroinitializer, <16 x i16> zeroinitializer)
 ; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> <i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
-; CHECK-NEXT:    store <32 x i8> [[_MSPROP_VECTOR_PACK]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    store <32 x i8> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
   %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
@@ -72,8 +72,8 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() #0 {
 
 define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_packuswb(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <16 x i16> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <16 x i1> [[TMP3]] to <16 x i16>
@@ -81,7 +81,7 @@ define <32 x i8> @test_x86_avx2_packuswb(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i16>
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> [[TMP4]], <16 x i16> [[TMP6]])
 ; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <32 x i8> [[_MSPROP_VECTOR_PACK]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    store <32 x i8> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
   %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1) ; <<32 x i8>> [#uses=1]
@@ -95,7 +95,7 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() #0 {
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> zeroinitializer, <16 x i16> zeroinitializer)
 ; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> <i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
-; CHECK-NEXT:    store <32 x i8> [[_MSPROP_VECTOR_PACK]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    store <32 x i8> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
   %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
@@ -105,12 +105,12 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() #0 {
 
 define <32 x i8> @test_x86_avx2_pavg_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pavg_b(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, <32 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
-; CHECK-NEXT:    store <32 x i8> [[_MSPROP]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    store <32 x i8> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
   %res = call <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -121,12 +121,12 @@ declare <32 x i8> @llvm.x86.avx2.pavg.b(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_pavg_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pavg_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -137,15 +137,15 @@ declare <16 x i16> @llvm.x86.avx2.pavg.w(<16 x i16>, <16 x i16>) nounwind readno
 
 define <8 x i32> @test_x86_avx2_pmadd_wd(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pmadd_wd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i16> [[TMP3]] to <8 x i32>
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne <8 x i32> [[TMP4]], zeroinitializer
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i32>
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[TMP6]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1) ; <<8 x i32>> [#uses=1]
@@ -156,17 +156,17 @@ declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readn
 
 define i32 @test_x86_avx2_pmovmskb(<32 x i8> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pmovmskb(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, <32 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <32 x i8> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0:![0-9]+]]
 ; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7:[0-9]+]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6:[0-9]+]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %a0) ; <i32> [#uses=1]
@@ -177,12 +177,12 @@ declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_pmulh_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pmulh_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -193,12 +193,12 @@ declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readn
 
 define <16 x i16> @test_x86_avx2_pmulhu_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pmulhu_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -209,8 +209,8 @@ declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind read
 
 define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psad_bw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, <32 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <32 x i8> [[TMP3]] to <4 x i64>
@@ -218,7 +218,7 @@ define <4 x i64> @test_x86_avx2_psad_bw(<32 x i8> %a0, <32 x i8> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i64>
 ; CHECK-NEXT:    [[TMP7:%.*]] = lshr <4 x i64> [[TMP6]], <i64 48, i64 48, i64 48, i64 48>
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[TMP7]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP7]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1) ; <<4 x i64>> [#uses=1]
@@ -229,8 +229,8 @@ declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psll_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -240,7 +240,7 @@ define <8 x i32> @test_x86_avx2_psll_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <8 x i32> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <8 x i32> [[TMP9]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -251,8 +251,8 @@ declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
 
 define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psll_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -262,7 +262,7 @@ define <4 x i64> @test_x86_avx2_psll_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> [[TMP1]], <2 x i64> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <4 x i64> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> [[A0:%.*]], <2 x i64> [[A1]])
-; CHECK-NEXT:    store <4 x i64> [[TMP9]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -273,8 +273,8 @@ declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psll_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -284,7 +284,7 @@ define <16 x i16> @test_x86_avx2_psll_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> [[TMP1]], <8 x i16> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <16 x i16> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> [[A0:%.*]], <8 x i16> [[A1]])
-; CHECK-NEXT:    store <16 x i16> [[TMP9]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -295,12 +295,12 @@ declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnon
 
 define <8 x i32> @test_x86_avx2_pslli_d(<8 x i32> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pslli_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <8 x i32> [[TMP3]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -311,12 +311,12 @@ declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) nounwind readnone
 
 define <4 x i64> @test_x86_avx2_pslli_q(<4 x i64> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pslli_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <4 x i64> [[TMP3]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -327,12 +327,12 @@ declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_pslli_w(<16 x i16> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pslli_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <16 x i16> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <16 x i16> [[TMP3]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -343,8 +343,8 @@ declare <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16>, i32) nounwind readnone
 
 define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psra_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -354,7 +354,7 @@ define <8 x i32> @test_x86_avx2_psra_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <8 x i32> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <8 x i32> [[TMP9]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -365,8 +365,8 @@ declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psra_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -376,7 +376,7 @@ define <16 x i16> @test_x86_avx2_psra_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> [[TMP1]], <8 x i16> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <16 x i16> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> [[A0:%.*]], <8 x i16> [[A1]])
-; CHECK-NEXT:    store <16 x i16> [[TMP9]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -387,12 +387,12 @@ declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnon
 
 define <8 x i32> @test_x86_avx2_psrai_d(<8 x i32> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrai_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <8 x i32> [[TMP3]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -403,12 +403,12 @@ declare <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32>, i32) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_psrai_w(<16 x i16> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrai_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <16 x i16> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <16 x i16> [[TMP3]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -419,8 +419,8 @@ declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) nounwind readnone
 
 define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrl_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -430,7 +430,7 @@ define <8 x i32> @test_x86_avx2_psrl_d(<8 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <8 x i32> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <8 x i32> [[TMP9]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -441,8 +441,8 @@ declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
 
 define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrl_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -452,7 +452,7 @@ define <4 x i64> @test_x86_avx2_psrl_q(<4 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> [[TMP1]], <2 x i64> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <4 x i64> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> [[A0:%.*]], <2 x i64> [[A1]])
-; CHECK-NEXT:    store <4 x i64> [[TMP9]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -463,8 +463,8 @@ declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrl_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -474,7 +474,7 @@ define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> [[TMP1]], <8 x i16> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <16 x i16> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> [[A0:%.*]], <8 x i16> [[A1]])
-; CHECK-NEXT:    store <16 x i16> [[TMP9]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -483,22 +483,22 @@ define <16 x i16> @test_x86_avx2_psrl_w(<16 x i16> %a0, <8 x i16> %a1) #0 {
 declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone
 
 
-define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, <8 x i16>* %p) #0 {
+define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, ptr %p) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrl_w_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to i64*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
 ; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A1:%.*]] = load <8 x i16>, <8 x i16>* [[P:%.*]], align 16
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <8 x i16>* [[P]] to i64
+; CHECK-NEXT:    [[A1:%.*]] = load <8 x i16>, ptr [[P:%.*]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <8 x i16>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <8 x i16>, <8 x i16>* [[TMP7]], align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <8 x i16>, ptr [[TMP7]], align 16
 ; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x i16> [[_MSLD]] to i128
 ; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i64
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ne i64 [[TMP9]], 0
@@ -507,10 +507,10 @@ define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, <8 x i16>* %p) #0 {
 ; CHECK-NEXT:    [[TMP13:%.*]] = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> [[TMP2]], <8 x i16> [[A1]])
 ; CHECK-NEXT:    [[TMP14:%.*]] = or <16 x i16> [[TMP13]], [[TMP12]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> [[A0:%.*]], <8 x i16> [[A1]])
-; CHECK-NEXT:    store <16 x i16> [[TMP14]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP14]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
-  %a1 = load <8 x i16>, <8 x i16>* %p
+  %a1 = load <8 x i16>, ptr %p
   %res = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
@@ -518,12 +518,12 @@ define <16 x i16> @test_x86_avx2_psrl_w_load(<16 x i16> %a0, <8 x i16>* %p) #0 {
 
 define <8 x i32> @test_x86_avx2_psrli_d(<8 x i32> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrli_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <8 x i32> [[TMP3]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %a0, i32 7) ; <<8 x i32>> [#uses=1]
@@ -534,12 +534,12 @@ declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) nounwind readnone
 
 define <4 x i64> @test_x86_avx2_psrli_q(<4 x i64> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrli_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i64> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <4 x i64> [[TMP3]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1]
@@ -550,12 +550,12 @@ declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_psrli_w(<16 x i16> %a0) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrli_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <16 x i16> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <16 x i16> [[TMP3]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %a0, i32 7) ; <<16 x i16>> [#uses=1]
@@ -566,12 +566,12 @@ declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) nounwind readnone
 
 define <8 x i32> @test_x86_avx2_phadd_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_phadd_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -582,12 +582,12 @@ declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_phadd_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_phadd_sw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -598,12 +598,12 @@ declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind read
 
 define <16 x i16> @test_x86_avx2_phadd_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_phadd_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -614,12 +614,12 @@ declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readn
 
 define <8 x i32> @test_x86_avx2_phsub_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_phsub_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -630,12 +630,12 @@ declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_phsub_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_phsub_sw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -646,12 +646,12 @@ declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind read
 
 define <16 x i16> @test_x86_avx2_phsub_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_phsub_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -662,15 +662,15 @@ declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readn
 
 define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, <32 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <32 x i8> [[TMP3]] to <16 x i16>
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne <16 x i16> [[TMP4]], zeroinitializer
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <16 x i1> [[TMP5]] to <16 x i16>
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[TMP6]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1]
@@ -678,43 +678,43 @@ define <16 x i16> @test_x86_avx2_pmadd_ub_sw(<32 x i8> %a0, <32 x i8> %a1) #0 {
 }
 declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone
 
-define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(<32 x i8>* %ptr, <32 x i8> %a1) #0 {
+define <16 x i16> @test_x86_avx2_pmadd_ub_sw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pmadd_ub_sw_load_op0(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
 ; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A0:%.*]] = load <32 x i8>, <32 x i8>* [[PTR:%.*]], align 32
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <32 x i8>* [[PTR]] to i64
+; CHECK-NEXT:    [[A0:%.*]] = load <32 x i8>, ptr [[PTR:%.*]], align 32
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <32 x i8>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <32 x i8>, <32 x i8>* [[TMP7]], align 32
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP7]], align 32
 ; CHECK-NEXT:    [[TMP8:%.*]] = or <32 x i8> [[_MSLD]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <32 x i8> [[TMP8]] to <16 x i16>
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ne <16 x i16> [[TMP9]], zeroinitializer
 ; CHECK-NEXT:    [[TMP11:%.*]] = sext <16 x i1> [[TMP10]] to <16 x i16>
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> [[A0]], <32 x i8> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[TMP11]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[TMP11]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
-  %a0 = load <32 x i8>, <32 x i8>* %ptr
+  %a0 = load <32 x i8>, ptr %ptr
   %res = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
 
 define <16 x i16> @test_x86_avx2_pmul_hr_sw(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pmul_hr_sw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -725,12 +725,12 @@ declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind re
 
 define <32 x i8> @test_x86_avx2_pshuf_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pshuf_b(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, <32 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
-; CHECK-NEXT:    store <32 x i8> [[_MSPROP]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    store <32 x i8> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
   %res = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -741,12 +741,12 @@ declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <32 x i8> @test_x86_avx2_psign_b(<32 x i8> %a0, <32 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psign_b(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, <32 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]])
-; CHECK-NEXT:    store <32 x i8> [[_MSPROP]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    store <32 x i8> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
   %res = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1) ; <<32 x i8>> [#uses=1]
@@ -757,12 +757,12 @@ declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
 
 define <8 x i32> @test_x86_avx2_psign_d(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psign_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -773,12 +773,12 @@ declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
 
 define <16 x i16> @test_x86_avx2_psign_w(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psign_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1) ; <<16 x i16>> [#uses=1]
@@ -789,8 +789,8 @@ declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readn
 
 define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_mpsadbw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, <32 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <32 x i8> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -799,11 +799,11 @@ define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) #0 {
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
 ; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
 ; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <16 x i16> zeroinitializer, <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) ; <<16 x i16>> [#uses=1]
@@ -811,22 +811,22 @@ define <16 x i16> @test_x86_avx2_mpsadbw(<32 x i8> %a0, <32 x i8> %a1) #0 {
 }
 declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind readnone
 
-define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(<32 x i8>* %ptr, <32 x i8> %a1) #0 {
+define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(ptr %ptr, <32 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_mpsadbw_load_op0(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
 ; CHECK:       3:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A0:%.*]] = load <32 x i8>, <32 x i8>* [[PTR:%.*]], align 32
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <32 x i8>* [[PTR]] to i64
+; CHECK-NEXT:    [[A0:%.*]] = load <32 x i8>, ptr [[PTR:%.*]], align 32
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <32 x i8>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <32 x i8>, <32 x i8>* [[TMP7]], align 32
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP7]], align 32
 ; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <32 x i8> [[_MSLD]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP8]], 0
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <32 x i8> [[TMP2]] to i256
@@ -834,22 +834,22 @@ define <16 x i16> @test_x86_avx2_mpsadbw_load_op0(<32 x i8>* %ptr, <32 x i8> %a1
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
 ; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
 ; CHECK:       10:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       11:
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> [[A0]], <32 x i8> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <16 x i16> zeroinitializer, <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
-  %a0 = load <32 x i8>, <32 x i8>* %ptr
+  %a0 = load <32 x i8>, ptr %ptr
   %res = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) ; <<16 x i16>> [#uses=1]
   ret <16 x i16> %res
 }
 
 define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_packusdw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
@@ -857,7 +857,7 @@ define <16 x i16> @test_x86_avx2_packusdw(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i32>
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> [[TMP4]], <8 x i32> [[TMP6]])
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP_VECTOR_PACK]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1) ; <<16 x i16>> [#uses=1]
@@ -871,7 +871,7 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() #0 {
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP_VECTOR_PACK]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[RES]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
@@ -881,14 +881,14 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() #0 {
 
 define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pblendvb(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, <32 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <32 x i8>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 64) to <32 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <32 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <32 x i8> [[_MSPROP]], [[TMP3]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> [[A0:%.*]], <32 x i8> [[A1:%.*]], <32 x i8> [[A2:%.*]])
-; CHECK-NEXT:    store <32 x i8> [[_MSPROP1]], <32 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <32 x i8>*), align 8
+; CHECK-NEXT:    store <32 x i8> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <32 x i8> [[RES]]
 ;
   %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) ; <<32 x i8>> [#uses=1]
@@ -899,12 +899,12 @@ declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounw
 
 define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pblendw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, <16 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = shufflevector <16 x i16> [[TMP1]], <16 x i16> [[TMP2]], <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 15>
 ; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <16 x i16> [[A0:%.*]], <16 x i16> [[A1:%.*]], <16 x i32> <i32 16, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], <16 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i16>*), align 8
+; CHECK-NEXT:    store <16 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i16> [[TMP3]]
 ;
   %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i8 7) ; <<16 x i16>> [#uses=1]
@@ -915,12 +915,12 @@ declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind r
 
 define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pblendd_128(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> <i32 4, i32 5, i32 6, i32 3>
 ; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]], <4 x i32> <i32 4, i32 5, i32 6, i32 3>
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[TMP3]]
 ;
   %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i8 7) ; <<4 x i32>> [#uses=1]
@@ -931,12 +931,12 @@ declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind
 
 define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_pblendd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
 ; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]], <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[TMP3]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i8 7) ; <<8 x i32>> [#uses=1]
@@ -947,12 +947,12 @@ declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind
 
 define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_permd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -963,8 +963,8 @@ declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
 
 define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_permps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP3]], 0
@@ -973,11 +973,11 @@ define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
 ; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
 ; CHECK:       5:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx2.permps(<8 x float> [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> %a1) ; <<8 x float>> [#uses=1]
@@ -986,10 +986,10 @@ define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x i32> %a1) #0 {
 declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind readonly
 
 
-define <2 x i64> @test_x86_avx2_maskload_q(i8* %a0, <2 x i64> %a1) #0 {
+define <2 x i64> @test_x86_avx2_maskload_q(ptr %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_maskload_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -997,23 +997,23 @@ define <2 x i64> @test_x86_avx2_maskload_q(i8* %a0, <2 x i64> %a1) #0 {
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
 ; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
 ; CHECK:       4:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       5:
-; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* [[A0:%.*]], <2 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.maskload.q(ptr [[A0:%.*]], <2 x i64> [[A1:%.*]])
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
-  %res = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
+  %res = call <2 x i64> @llvm.x86.avx2.maskload.q(ptr %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
   ret <2 x i64> %res
 }
-declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) nounwind readonly
+declare <2 x i64> @llvm.x86.avx2.maskload.q(ptr, <2 x i64>) nounwind readonly
 
 
-define <4 x i64> @test_x86_avx2_maskload_q_256(i8* %a0, <4 x i64> %a1) #0 {
+define <4 x i64> @test_x86_avx2_maskload_q_256(ptr %a0, <4 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_maskload_q_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -1021,23 +1021,23 @@ define <4 x i64> @test_x86_avx2_maskload_q_256(i8* %a0, <4 x i64> %a1) #0 {
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
 ; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
 ; CHECK:       4:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       5:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* [[A0:%.*]], <4 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr [[A0:%.*]], <4 x i64> [[A1:%.*]])
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
-  %res = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
+  %res = call <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
   ret <4 x i64> %res
 }
-declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) nounwind readonly
+declare <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr, <4 x i64>) nounwind readonly
 
 
-define <4 x i32> @test_x86_avx2_maskload_d(i8* %a0, <4 x i32> %a1) #0 {
+define <4 x i32> @test_x86_avx2_maskload_d(ptr %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_maskload_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -1045,23 +1045,23 @@ define <4 x i32> @test_x86_avx2_maskload_d(i8* %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
 ; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
 ; CHECK:       4:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       5:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* [[A0:%.*]], <4 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.maskload.d(ptr [[A0:%.*]], <4 x i32> [[A1:%.*]])
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
-  %res = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
+  %res = call <4 x i32> @llvm.x86.avx2.maskload.d(ptr %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
   ret <4 x i32> %res
 }
-declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) nounwind readonly
+declare <4 x i32> @llvm.x86.avx2.maskload.d(ptr, <4 x i32>) nounwind readonly
 
 
-define <8 x i32> @test_x86_avx2_maskload_d_256(i8* %a0, <8 x i32> %a1) #0 {
+define <8 x i32> @test_x86_avx2_maskload_d_256(ptr %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_maskload_d_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -1069,24 +1069,24 @@ define <8 x i32> @test_x86_avx2_maskload_d_256(i8* %a0, <8 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
 ; CHECK-NEXT:    br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
 ; CHECK:       4:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       5:
-; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* [[A0:%.*]], <8 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr [[A0:%.*]], <8 x i32> [[A1:%.*]])
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
-  %res = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
+  %res = call <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
   ret <8 x i32> %res
 }
-declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) nounwind readonly
+declare <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr, <8 x i32>) nounwind readonly
 
 
-define void @test_x86_avx2_maskstore_q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) #0 {
+define void @test_x86_avx2_maskstore_q(ptr %a0, <2 x i64> %a1, <2 x i64> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx2_maskstore_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
@@ -1097,23 +1097,23 @@ define void @test_x86_avx2_maskstore_q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) #0
 ; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
 ; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
 ; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       7:
-; CHECK-NEXT:    call void @llvm.x86.avx2.maskstore.q(i8* [[A0:%.*]], <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.avx2.maskstore.q(ptr [[A0:%.*]], <2 x i64> [[A1:%.*]], <2 x i64> [[A2:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.avx2.maskstore.q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2)
+  call void @llvm.x86.avx2.maskstore.q(ptr %a0, <2 x i64> %a1, <2 x i64> %a2)
   ret void
 }
-declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind
+declare void @llvm.x86.avx2.maskstore.q(ptr, <2 x i64>, <2 x i64>) nounwind
 
 
-define void @test_x86_avx2_maskstore_q_256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) #0 {
+define void @test_x86_avx2_maskstore_q_256(ptr %a0, <4 x i64> %a1, <4 x i64> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx2_maskstore_q_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i64> [[TMP2]] to i256
@@ -1124,23 +1124,23 @@ define void @test_x86_avx2_maskstore_q_256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2
 ; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
 ; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
 ; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       7:
-; CHECK-NEXT:    call void @llvm.x86.avx2.maskstore.q.256(i8* [[A0:%.*]], <4 x i64> [[A1:%.*]], <4 x i64> [[A2:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.avx2.maskstore.q.256(ptr [[A0:%.*]], <4 x i64> [[A1:%.*]], <4 x i64> [[A2:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.avx2.maskstore.q.256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2)
+  call void @llvm.x86.avx2.maskstore.q.256(ptr %a0, <4 x i64> %a1, <4 x i64> %a2)
   ret void
 }
-declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>) nounwind
+declare void @llvm.x86.avx2.maskstore.q.256(ptr, <4 x i64>, <4 x i64>) nounwind
 
 
-define void @test_x86_avx2_maskstore_d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) #0 {
+define void @test_x86_avx2_maskstore_d(ptr %a0, <4 x i32> %a1, <4 x i32> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx2_maskstore_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
@@ -1151,23 +1151,23 @@ define void @test_x86_avx2_maskstore_d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) #0
 ; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
 ; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
 ; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       7:
-; CHECK-NEXT:    call void @llvm.x86.avx2.maskstore.d(i8* [[A0:%.*]], <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.avx2.maskstore.d(ptr [[A0:%.*]], <4 x i32> [[A1:%.*]], <4 x i32> [[A2:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.avx2.maskstore.d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2)
+  call void @llvm.x86.avx2.maskstore.d(ptr %a0, <4 x i32> %a1, <4 x i32> %a2)
   ret void
 }
-declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind
+declare void @llvm.x86.avx2.maskstore.d(ptr, <4 x i32>, <4 x i32>) nounwind
 
 
-define void @test_x86_avx2_maskstore_d_256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) #0 {
+define void @test_x86_avx2_maskstore_d_256(ptr %a0, <8 x i32> %a1, <8 x i32> %a2) #0 {
 ; CHECK-LABEL: @test_x86_avx2_maskstore_d_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
@@ -1178,29 +1178,29 @@ define void @test_x86_avx2_maskstore_d_256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2
 ; CHECK-NEXT:    [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
 ; CHECK-NEXT:    br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
 ; CHECK:       6:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       7:
-; CHECK-NEXT:    call void @llvm.x86.avx2.maskstore.d.256(i8* [[A0:%.*]], <8 x i32> [[A1:%.*]], <8 x i32> [[A2:%.*]])
+; CHECK-NEXT:    call void @llvm.x86.avx2.maskstore.d.256(ptr [[A0:%.*]], <8 x i32> [[A1:%.*]], <8 x i32> [[A2:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.avx2.maskstore.d.256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2)
+  call void @llvm.x86.avx2.maskstore.d.256(ptr %a0, <8 x i32> %a1, <8 x i32> %a2)
   ret void
 }
-declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) nounwind
+declare void @llvm.x86.avx2.maskstore.d.256(ptr, <8 x i32>, <8 x i32>) nounwind
 
 
 define <4 x i32> @test_x86_avx2_psllv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psllv_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <4 x i32> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1218,7 +1218,7 @@ define <4 x i32> @test_x86_avx2_psllv_d_const() #0 {
 ; CHECK-NEXT:    [[RES1:%.*]] = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> <i32 1, i32 1, i32 1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 -1>)
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP4]]
 ; CHECK-NEXT:    [[RES2:%.*]] = add <4 x i32> [[RES0]], [[RES1]]
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES2]]
 ;
   %res0 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> <i32 2, i32 9, i32 0, i32 -1>, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>)
@@ -1231,15 +1231,15 @@ declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <8 x i32> @test_x86_avx2_psllv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psllv_d_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> [[TMP1]], <8 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <8 x i32> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> [[A0:%.*]], <8 x i32> [[A1]])
-; CHECK-NEXT:    store <8 x i32> [[TMP6]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1257,7 +1257,7 @@ define <8 x i32> @test_x86_avx2_psllv_d_256_const() #0 {
 ; CHECK-NEXT:    [[RES1:%.*]] = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 -1>)
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP4]]
 ; CHECK-NEXT:    [[RES2:%.*]] = add <8 x i32> [[RES0]], [[RES1]]
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES2]]
 ;
   %res0 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>)
@@ -1270,15 +1270,15 @@ declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind read
 
 define <2 x i64> @test_x86_avx2_psllv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psllv_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> [[TMP1]], <2 x i64> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <2 x i64> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> [[A0:%.*]], <2 x i64> [[A1]])
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1290,7 +1290,7 @@ define <2 x i64> @test_x86_avx2_psllv_q_const() #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> zeroinitializer, <2 x i64> <i64 1, i64 -1>)
 ; CHECK-NEXT:    [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> <i64 4, i64 -1>, <2 x i64> <i64 1, i64 -1>)
-; CHECK-NEXT:    store <2 x i64> [[TMP2]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP2]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> <i64 4, i64 -1>, <2 x i64> <i64 1, i64 -1>)
@@ -1301,15 +1301,15 @@ declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <4 x i64> @test_x86_avx2_psllv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psllv_q_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> [[TMP1]], <4 x i64> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <4 x i64> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> [[A0:%.*]], <4 x i64> [[A1]])
-; CHECK-NEXT:    store <4 x i64> [[TMP6]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -1322,7 +1322,7 @@ define <4 x i64> @test_x86_avx2_psllv_q_256_const() #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
 ; CHECK-NEXT:    [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
-; CHECK-NEXT:    store <4 x i64> [[TMP2]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP2]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
@@ -1333,15 +1333,15 @@ declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind read
 
 define <4 x i32> @test_x86_avx2_psrlv_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrlv_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <4 x i32> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1359,7 +1359,7 @@ define <4 x i32> @test_x86_avx2_psrlv_d_const() #0 {
 ; CHECK-NEXT:    [[RES1:%.*]] = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> <i32 4, i32 4, i32 4, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 -1>)
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP4]]
 ; CHECK-NEXT:    [[RES2:%.*]] = add <4 x i32> [[RES0]], [[RES1]]
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES2]]
 ;
   %res0 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> <i32 2, i32 9, i32 0, i32 -1>, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>)
@@ -1372,15 +1372,15 @@ declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <8 x i32> @test_x86_avx2_psrlv_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrlv_d_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> [[TMP1]], <8 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <8 x i32> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> [[A0:%.*]], <8 x i32> [[A1]])
-; CHECK-NEXT:    store <8 x i32> [[TMP6]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1398,7 +1398,7 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256_const() #0 {
 ; CHECK-NEXT:    [[RES1:%.*]] = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 -1>)
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP4]]
 ; CHECK-NEXT:    [[RES2:%.*]] = add <8 x i32> [[RES0]], [[RES1]]
-; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES2]]
 ;
   %res0 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>)
@@ -1411,15 +1411,15 @@ declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind read
 
 define <2 x i64> @test_x86_avx2_psrlv_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrlv_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <2 x i64> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> [[TMP1]], <2 x i64> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <2 x i64> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> [[A0:%.*]], <2 x i64> [[A1]])
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1432,7 +1432,7 @@ define <2 x i64> @test_x86_avx2_psrlv_q_const() #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> zeroinitializer, <2 x i64> <i64 1, i64 -1>)
 ; CHECK-NEXT:    [[TMP2:%.*]] = or <2 x i64> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> <i64 4, i64 4>, <2 x i64> <i64 1, i64 -1>)
-; CHECK-NEXT:    store <2 x i64> [[TMP2]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP2]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> <i64 4, i64 4>, <2 x i64> <i64 1, i64 -1>)
@@ -1443,15 +1443,15 @@ declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <4 x i64> @test_x86_avx2_psrlv_q_256(<4 x i64> %a0, <4 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrlv_q_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <4 x i64> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i64>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> [[TMP1]], <4 x i64> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <4 x i64> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> [[A0:%.*]], <4 x i64> [[A1]])
-; CHECK-NEXT:    store <4 x i64> [[TMP6]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1) ; <<4 x i64>> [#uses=1]
@@ -1465,7 +1465,7 @@ define <4 x i64> @test_x86_avx2_psrlv_q_256_const() #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> zeroinitializer, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
 ; CHECK-NEXT:    [[TMP2:%.*]] = or <4 x i64> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 4>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
-; CHECK-NEXT:    store <4 x i64> [[TMP2]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    store <4 x i64> [[TMP2]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 4>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
@@ -1476,15 +1476,15 @@ declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind read
 
 define <4 x i32> @test_x86_avx2_psrav_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrav_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <4 x i32> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1497,7 +1497,7 @@ define <4 x i32> @test_x86_avx2_psrav_d_const() #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
 ; CHECK-NEXT:    [[TMP2:%.*]] = or <4 x i32> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> <i32 2, i32 9, i32 -12, i32 23>, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
-; CHECK-NEXT:    store <4 x i32> [[TMP2]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP2]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> <i32 2, i32 9, i32 -12, i32 23>, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
@@ -1507,15 +1507,15 @@ declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <8 x i32> @test_x86_avx2_psrav_d_256(<8 x i32> %a0, <8 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_avx2_psrav_d_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <8 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i32>
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> [[TMP1]], <8 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = or <8 x i32> [[TMP5]], [[TMP4]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> [[A0:%.*]], <8 x i32> [[A1]])
-; CHECK-NEXT:    store <8 x i32> [[TMP6]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
@@ -1528,7 +1528,7 @@ define <8 x i32> @test_x86_avx2_psrav_d_256_const() #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
 ; CHECK-NEXT:    [[TMP2:%.*]] = or <8 x i32> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
-; CHECK-NEXT:    store <8 x i32> [[TMP2]], <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    store <8 x i32> [[TMP2]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
@@ -1536,12 +1536,12 @@ define <8 x i32> @test_x86_avx2_psrav_d_256_const() #0 {
 }
 declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind readnone
 
-define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, i8* %a1, <4 x i32> %idx, <2 x double> %mask) #0 {
+define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, ptr %a1, <4 x i32> %idx, <2 x double> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_d_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1555,26 +1555,26 @@ define <2 x double> @test_x86_avx2_gather_d_pd(<2 x double> %a0, i8* %a1, <4 x i
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> [[A0:%.*]], i8* [[A1:%.*]], <4 x i32> [[IDX:%.*]], <2 x double> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <2 x double> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0,
-  i8* %a1, <4 x i32> %idx, <2 x double> %mask, i8 2) ;
+  ptr %a1, <4 x i32> %idx, <2 x double> %mask, i8 2) ;
   ret <2 x double> %res
 }
-declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*,
+declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, ptr,
   <4 x i32>, <2 x double>, i8) nounwind readonly
 
-define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, i8* %a1, <4 x i32> %idx, <4 x double> %mask) #0 {
+define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, ptr %a1, <4 x i32> %idx, <4 x double> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_d_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1588,26 +1588,26 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(<4 x double> %a0, i8* %a1, <4
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> [[A0:%.*]], i8* [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x double> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x double> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0,
-  i8* %a1, <4 x i32> %idx, <4 x double> %mask, i8 2) ;
+  ptr %a1, <4 x i32> %idx, <4 x double> %mask, i8 2) ;
   ret <4 x double> %res
 }
-declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*,
+declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, ptr,
   <4 x i32>, <4 x double>, i8) nounwind readonly
 
-define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, i8* %a1, <2 x i64> %idx, <2 x double> %mask) #0 {
+define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, ptr %a1, <2 x i64> %idx, <2 x double> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_q_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1621,26 +1621,26 @@ define <2 x double> @test_x86_avx2_gather_q_pd(<2 x double> %a0, i8* %a1, <2 x i
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> [[A0:%.*]], i8* [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x double> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x double> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0,
-  i8* %a1, <2 x i64> %idx, <2 x double> %mask, i8 2) ;
+  ptr %a1, <2 x i64> %idx, <2 x double> %mask, i8 2) ;
   ret <2 x double> %res
 }
-declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*,
+declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, ptr,
   <2 x i64>, <2 x double>, i8) nounwind readonly
 
-define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, i8* %a1, <4 x i64> %idx, <4 x double> %mask) #0 {
+define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, ptr %a1, <4 x i64> %idx, <4 x double> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_q_pd_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1654,26 +1654,26 @@ define <4 x double> @test_x86_avx2_gather_q_pd_256(<4 x double> %a0, i8* %a1, <4
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> [[A0:%.*]], i8* [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x double> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> [[A0:%.*]], ptr [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x double> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   %res = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0,
-  i8* %a1, <4 x i64> %idx, <4 x double> %mask, i8 2) ;
+  ptr %a1, <4 x i64> %idx, <4 x double> %mask, i8 2) ;
   ret <4 x double> %res
 }
-declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*,
+declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, ptr,
   <4 x i64>, <4 x double>, i8) nounwind readonly
 
-define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, i8* %a1, <4 x i32> %idx, <4 x float> %mask) #0 {
+define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, ptr %a1, <4 x i32> %idx, <4 x float> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_d_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1687,26 +1687,26 @@ define <4 x float> @test_x86_avx2_gather_d_ps(<4 x float> %a0, i8* %a1, <4 x i32
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> [[A0:%.*]], i8* [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0,
-  i8* %a1, <4 x i32> %idx, <4 x float> %mask, i8 2) ;
+  ptr %a1, <4 x i32> %idx, <4 x float> %mask, i8 2) ;
   ret <4 x float> %res
 }
-declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*,
+declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, ptr,
   <4 x i32>, <4 x float>, i8) nounwind readonly
 
-define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, i8* %a1, <8 x i32> %idx, <8 x float> %mask) #0 {
+define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, ptr %a1, <8 x i32> %idx, <8 x float> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_d_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1720,26 +1720,26 @@ define <8 x float> @test_x86_avx2_gather_d_ps_256(<8 x float> %a0, i8* %a1, <8 x
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> [[A0:%.*]], i8* [[A1:%.*]], <8 x i32> [[IDX:%.*]], <8 x float> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> [[A0:%.*]], ptr [[A1:%.*]], <8 x i32> [[IDX:%.*]], <8 x float> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
   %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0,
-  i8* %a1, <8 x i32> %idx, <8 x float> %mask, i8 2) ;
+  ptr %a1, <8 x i32> %idx, <8 x float> %mask, i8 2) ;
   ret <8 x float> %res
 }
-declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*,
+declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr,
   <8 x i32>, <8 x float>, i8) nounwind readonly
 
-define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, i8* %a1, <2 x i64> %idx, <4 x float> %mask) #0 {
+define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, ptr %a1, <2 x i64> %idx, <4 x float> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_q_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1753,26 +1753,26 @@ define <4 x float> @test_x86_avx2_gather_q_ps(<4 x float> %a0, i8* %a1, <2 x i64
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> [[A0:%.*]], i8* [[A1:%.*]], <2 x i64> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0,
-  i8* %a1, <2 x i64> %idx, <4 x float> %mask, i8 2) ;
+  ptr %a1, <2 x i64> %idx, <4 x float> %mask, i8 2) ;
   ret <4 x float> %res
 }
-declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*,
+declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, ptr,
   <2 x i64>, <4 x float>, i8) nounwind readonly
 
-define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, i8* %a1, <4 x i64> %idx, <4 x float> %mask) #0 {
+define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, ptr %a1, <4 x i64> %idx, <4 x float> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_q_ps_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1786,26 +1786,26 @@ define <4 x float> @test_x86_avx2_gather_q_ps_256(<4 x float> %a0, i8* %a1, <4 x
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> [[A0:%.*]], i8* [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> [[A0:%.*]], ptr [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x float> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %a0,
-  i8* %a1, <4 x i64> %idx, <4 x float> %mask, i8 2) ;
+  ptr %a1, <4 x i64> %idx, <4 x float> %mask, i8 2) ;
   ret <4 x float> %res
 }
-declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*,
+declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, ptr,
   <4 x i64>, <4 x float>, i8) nounwind readonly
 
-define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, i8* %a1, <4 x i32> %idx, <2 x i64> %mask) #0 {
+define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, ptr %a1, <4 x i32> %idx, <2 x i64> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_d_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1819,26 +1819,26 @@ define <2 x i64> @test_x86_avx2_gather_d_q(<2 x i64> %a0, i8* %a1, <4 x i32> %id
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> [[A0:%.*]], i8* [[A1:%.*]], <4 x i32> [[IDX:%.*]], <2 x i64> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <2 x i64> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0,
-  i8* %a1, <4 x i32> %idx, <2 x i64> %mask, i8 2) ;
+  ptr %a1, <4 x i32> %idx, <2 x i64> %mask, i8 2) ;
   ret <2 x i64> %res
 }
-declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*,
+declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, ptr,
   <4 x i32>, <2 x i64>, i8) nounwind readonly
 
-define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, i8* %a1, <4 x i32> %idx, <4 x i64> %mask) #0 {
+define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, ptr %a1, <4 x i32> %idx, <4 x i64> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_d_q_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1852,26 +1852,26 @@ define <4 x i64> @test_x86_avx2_gather_d_q_256(<4 x i64> %a0, i8* %a1, <4 x i32>
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> [[A0:%.*]], i8* [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x i64> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x i64> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0,
-  i8* %a1, <4 x i32> %idx, <4 x i64> %mask, i8 2) ;
+  ptr %a1, <4 x i32> %idx, <4 x i64> %mask, i8 2) ;
   ret <4 x i64> %res
 }
-declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*,
+declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, ptr,
   <4 x i32>, <4 x i64>, i8) nounwind readonly
 
-define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, i8* %a1, <2 x i64> %idx, <2 x i64> %mask) #0 {
+define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i64> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_q_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1885,26 +1885,26 @@ define <2 x i64> @test_x86_avx2_gather_q_q(<2 x i64> %a0, i8* %a1, <2 x i64> %id
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> [[A0:%.*]], i8* [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x i64> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x i64> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0,
-  i8* %a1, <2 x i64> %idx, <2 x i64> %mask, i8 2) ;
+  ptr %a1, <2 x i64> %idx, <2 x i64> %mask, i8 2) ;
   ret <2 x i64> %res
 }
-declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*,
+declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, ptr,
   <2 x i64>, <2 x i64>, i8) nounwind readonly
 
-define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, i8* %a1, <4 x i64> %idx, <4 x i64> %mask) #0 {
+define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, ptr %a1, <4 x i64> %idx, <4 x i64> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_q_q_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i64> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1918,26 +1918,26 @@ define <4 x i64> @test_x86_avx2_gather_q_q_256(<4 x i64> %a0, i8* %a1, <4 x i64>
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> [[A0:%.*]], i8* [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x i64> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> [[A0:%.*]], ptr [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x i64> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i64> [[RES]]
 ;
   %res = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0,
-  i8* %a1, <4 x i64> %idx, <4 x i64> %mask, i8 2) ;
+  ptr %a1, <4 x i64> %idx, <4 x i64> %mask, i8 2) ;
   ret <4 x i64> %res
 }
-declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*,
+declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, ptr,
   <4 x i64>, <4 x i64>, i8) nounwind readonly
 
-define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, i8* %a1, <4 x i32> %idx, <4 x i32> %mask) #0 {
+define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, ptr %a1, <4 x i32> %idx, <4 x i32> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_d_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -1951,26 +1951,26 @@ define <4 x i32> @test_x86_avx2_gather_d_d(<4 x i32> %a0, i8* %a1, <4 x i32> %id
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> [[A0:%.*]], i8* [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], <4 x i32> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0,
-  i8* %a1, <4 x i32> %idx, <4 x i32> %mask, i8 2) ;
+  ptr %a1, <4 x i32> %idx, <4 x i32> %mask, i8 2) ;
   ret <4 x i32> %res
 }
-declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*,
+declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr,
   <4 x i32>, <4 x i32>, i8) nounwind readonly
 
-define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, i8* %a1, <8 x i32> %idx, <8 x i32> %mask) #0 {
+define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, ptr %a1, <8 x i32> %idx, <8 x i32> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_d_d_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP5]], 0
@@ -1984,26 +1984,26 @@ define <8 x i32> @test_x86_avx2_gather_d_d_256(<8 x i32> %a0, i8* %a1, <8 x i32>
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> [[A0:%.*]], i8* [[A1:%.*]], <8 x i32> [[IDX:%.*]], <8 x i32> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> [[A0:%.*]], ptr [[A1:%.*]], <8 x i32> [[IDX:%.*]], <8 x i32> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i32> [[RES]]
 ;
   %res = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0,
-  i8* %a1, <8 x i32> %idx, <8 x i32> %mask, i8 2) ;
+  ptr %a1, <8 x i32> %idx, <8 x i32> %mask, i8 2) ;
   ret <8 x i32> %res
 }
-declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*,
+declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, ptr,
   <8 x i32>, <8 x i32>, i8) nounwind readonly
 
-define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, i8* %a1, <2 x i64> %idx, <4 x i32> %mask) #0 {
+define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, ptr %a1, <2 x i64> %idx, <4 x i32> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_q_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -2017,26 +2017,26 @@ define <4 x i32> @test_x86_avx2_gather_q_d(<4 x i32> %a0, i8* %a1, <2 x i64> %id
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> [[A0:%.*]], i8* [[A1:%.*]], <2 x i64> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0,
-  i8* %a1, <2 x i64> %idx, <4 x i32> %mask, i8 2) ;
+  ptr %a1, <2 x i64> %idx, <4 x i32> %mask, i8 2) ;
   ret <4 x i32> %res
 }
-declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*,
+declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, ptr,
   <2 x i64>, <4 x i32>, i8) nounwind readonly
 
-define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, i8* %a1, <4 x i64> %idx, <4 x i32> %mask) #0 {
+define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, ptr %a1, <4 x i64> %idx, <4 x i32> %mask) #0 {
 ; CHECK-LABEL: @test_x86_avx2_gather_q_d_256(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i64>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
@@ -2050,32 +2050,31 @@ define <4 x i32> @test_x86_avx2_gather_q_d_256(<4 x i32> %a0, i8* %a1, <4 x i64>
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> [[A0:%.*]], i8* [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> [[A0:%.*]], ptr [[A1:%.*]], <4 x i64> [[IDX:%.*]], <4 x i32> [[MASK:%.*]], i8 2)
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %a0,
-  i8* %a1, <4 x i64> %idx, <4 x i32> %mask, i8 2) ;
+  ptr %a1, <4 x i64> %idx, <4 x i32> %mask, i8 2) ;
   ret <4 x i32> %res
 }
-declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*,
+declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, ptr,
   <4 x i64>, <4 x i32>, i8) nounwind readonly
 
-define <8 x float>  @test_gather_mask(<8 x float> %a0, float* %a, <8 x i32> %idx, <8 x float> %mask, float* nocapture %out) #0 {
+define <8 x float>  @test_gather_mask(<8 x float> %a0, ptr %a, <8 x i32> %idx, <8 x float> %mask, ptr nocapture %out) #0 {
 ; CHECK-LABEL: @test_gather_mask(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to i64*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <8 x i32>*), align 8
-; CHECK-NEXT:    [[TMP5:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 104) to i64*), align 8
-; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[A_I8:%.*]] = bitcast float* [[A:%.*]] to i8*
-; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 104) to ptr), align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP6]], 0
-; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <8 x i32> [[TMP3]] to i256
 ; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i256 [[TMP7]], 0
@@ -2085,41 +2084,38 @@ define <8 x float>  @test_gather_mask(<8 x float> %a0, float* %a, <8 x i32> %idx
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP9:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
 ; CHECK:       9:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       10:
-; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> [[A0:%.*]], i8* [[A_I8]], <8 x i32> [[IDX:%.*]], <8 x float> [[MASK:%.*]], i8 4)
-; CHECK-NEXT:    [[OUT_PTR:%.*]] = bitcast float* [[OUT:%.*]] to <8 x float>*
+; CHECK-NEXT:    [[RES:%.*]] = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> [[A0:%.*]], ptr [[A:%.*]], <8 x i32> [[IDX:%.*]], <8 x float> [[MASK:%.*]], i8 4)
 ; CHECK-NEXT:    [[_MSCMP6:%.*]] = icmp ne i64 [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP6]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
 ; CHECK:       11:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       12:
-; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint <8 x float>* [[OUT_PTR]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[OUT:%.*]] to i64
 ; CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to <8 x i32>*
-; CHECK-NEXT:    store <8 x i32> [[TMP4]], <8 x i32>* [[TMP15]], align 4
-; CHECK-NEXT:    store <8 x float> [[MASK]], <8 x float>* [[OUT_PTR]], align 4
-; CHECK-NEXT:    store <8 x i32> zeroinitializer, <8 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
+; CHECK-NEXT:    store <8 x i32> [[TMP4]], ptr [[TMP15]], align 4
+; CHECK-NEXT:    store <8 x float> [[MASK]], ptr [[OUT]], align 4
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x float> [[RES]]
 ;
-  %a_i8 = bitcast float* %a to i8*
   %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0,
-  i8* %a_i8, <8 x i32> %idx, <8 x float> %mask, i8 4) ;
+  ptr %a, <8 x i32> %idx, <8 x float> %mask, i8 4) ;
 
-  %out_ptr = bitcast float * %out to <8 x float> *
-  store <8 x float> %mask, <8 x float> * %out_ptr, align 4
+  store <8 x float> %mask, ptr %out, align 4
 
   ret <8 x float> %res
 }
 
-define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, i8* %a1, <2 x i64> %idx, <2 x i1> %mask) #0 {
+define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, ptr %a1, <2 x i64> %idx, <2 x i1> %mask) #0 {
 ; CHECK-LABEL: @test_mask_demanded_bits(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i1>, <2 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <2 x i1>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[MASK1:%.*]] = sext <2 x i1> [[MASK:%.*]] to <2 x i64>
@@ -2135,16 +2131,16 @@ define <2 x i64> @test_mask_demanded_bits(<2 x i64> %a0, i8* %a1, <2 x i64> %idx
 ; CHECK-NEXT:    [[_MSOR5:%.*]] = or i1 [[_MSOR3]], [[_MSCMP4]]
 ; CHECK-NEXT:    br i1 [[_MSOR5]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       9:
-; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> [[A0:%.*]], i8* [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x i64> [[MASK1]], i8 2)
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> [[A0:%.*]], ptr [[A1:%.*]], <2 x i64> [[IDX:%.*]], <2 x i64> [[MASK1]], i8 2)
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %mask1 = sext <2 x i1> %mask to <2 x i64>
   %res = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0,
-  i8* %a1, <2 x i64> %idx, <2 x i64> %mask1, i8 2) ;
+  ptr %a1, <2 x i64> %idx, <2 x i64> %mask1, i8 2) ;
   ret <2 x i64> %res
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll b/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll
index 41d7cd8f9faea..f83a92287d247 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/byval-alignment.ll
@@ -8,13 +8,13 @@ target triple = "x86_64-unknown-linux-gnu"
 
 %struct.S = type { i64, i64, i64, [8 x i8] }
 
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{.*}} add {{.*}} ptrtoint {{.*}} @__msan_param_tls {{.*}} i64 8) {{.*}}, i8* align 8 {{.*}}, i64 32, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{.*}} add {{.*}} ptrtoint {{.*}} @__msan_param_tls {{.*}} i64 8) {{.*}}, ptr align 8 {{.*}}, i64 32, i1 false)
 
 define void @Caller() sanitize_memory {
 entry:
   %agg.tmp = alloca %struct.S, align 16
-  call void @Callee(i32 1, %struct.S* byval(%struct.S) align 16 %agg.tmp)
+  call void @Callee(i32 1, ptr byval(%struct.S) align 16 %agg.tmp)
   ret void
 }
 
-declare void @Callee(i32, %struct.S* byval(%struct.S) align 16)
+declare void @Callee(i32, ptr byval(%struct.S) align 16)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/byval.ll b/llvm/test/Instrumentation/MemorySanitizer/byval.ll
index 372368936cfd7..258cec866d6a8 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/byval.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/byval.ll
@@ -3,181 +3,181 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare void @FnByVal(i128* byval(i128) %p);
-declare void @Fn(i128* %p);
+declare void @FnByVal(ptr byval(i128) %p);
+declare void @Fn(ptr %p);
 
-define i128 @ByValArgument(i32, i128* byval(i128) %p) sanitize_memory {
+define i128 @ByValArgument(i32, ptr byval(i128) %p) sanitize_memory {
 ; CHECK-LABEL: @ByValArgument(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[#]], i8* align 8 inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i8*), i64 16, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[#]], i8* align 4 inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i8*), i64 16, i1 false)
-; CHECK:         [[X:%.*]] = load i128, i128* %p, align 8
-; CHECK:         [[_MSLD:%.*]] = load i128, i128* %[[#]], align 8
-; CHECK:         %[[#]] = load i32, i32* %[[#]], align 8
-; CHECK:         store i128 [[_MSLD]], i128* bitcast ([100 x i64]* @__msan_retval_tls to i128*), align 8
-; CHECK:         store i32 %[[#]], i32* @__msan_retval_origin_tls, align 4
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[#]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK:         [[X:%.*]] = load i128, ptr %p, align 8
+; CHECK:         [[_MSLD:%.*]] = load i128, ptr %[[#]], align 8
+; CHECK:         %[[#]] = load i32, ptr %[[#]], align 8
+; CHECK:         store i128 [[_MSLD]], ptr @__msan_retval_tls, align 8
+; CHECK:         store i32 %[[#]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK:         ret i128 [[X]]
 ;
 entry:
-  %x = load i128, i128* %p
+  %x = load i128, ptr %p
   ret i128 %x
 }
 
-define i128 @ByValArgumentNoSanitize(i32, i128* byval(i128) %p) {
+define i128 @ByValArgumentNoSanitize(i32, ptr byval(i128) %p) {
 ; CHECK-LABEL: @ByValArgumentNoSanitize(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memset.p0i8.i64(i8* align 8 %[[#]], i8 0, i64 16, i1 false)
-; CHECK:         [[X:%.*]] = load i128, i128* %p, align 8
-; CHECK:         store i128 0, i128* bitcast ([100 x i64]* @__msan_retval_tls to i128*), align 8
-; CHECK:         store i32 0, i32* @__msan_retval_origin_tls, align 4
+; CHECK:         call void @llvm.memset.p0.i64(ptr align 8 %[[#]], i8 0, i64 16, i1 false)
+; CHECK:         [[X:%.*]] = load i128, ptr %p, align 8
+; CHECK:         store i128 0, ptr @__msan_retval_tls, align 8
+; CHECK:         store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; CHECK:         ret i128 [[X]]
 ;
 entry:
-  %x = load i128, i128* %p
+  %x = load i128, ptr %p
   ret i128 %x
 }
 
-define void @ByValForward(i32, i128* byval(i128) %p) sanitize_memory {
+define void @ByValForward(i32, ptr byval(i128) %p) sanitize_memory {
 ; CHECK-LABEL: @ByValForward(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[#]], i8* align 8 inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i8*), i64 16, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[#]], i8* align 4 inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i8*), i64 16, i1 false)
-; CHECK:         store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK:         call void @Fn(i128* %p)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[#]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK:         store i64 0, ptr @__msan_param_tls, align 8
+; CHECK:         call void @Fn(ptr %p)
 ; CHECK:         ret void
 ;
 entry:
-  call void @Fn(i128* %p)
+  call void @Fn(ptr %p)
   ret void
 }
 
-define void @ByValForwardNoSanitize(i32, i128* byval(i128) %p) {
+define void @ByValForwardNoSanitize(i32, ptr byval(i128) %p) {
 ; CHECK-LABEL: @ByValForwardNoSanitize(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memset.p0i8.i64(i8* align 8 %[[#]], i8 0, i64 16, i1 false)
-; CHECK:         store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK:         call void @Fn(i128* %p)
+; CHECK:         call void @llvm.memset.p0.i64(ptr align 8 %[[#]], i8 0, i64 16, i1 false)
+; CHECK:         store i64 0, ptr @__msan_param_tls, align 8
+; CHECK:         call void @Fn(ptr %p)
 ; CHECK:         ret void
 ;
 entry:
-  call void @Fn(i128* %p)
+  call void @Fn(ptr %p)
   ret void
 }
 
-define void @ByValForwardByVal(i32, i128* byval(i128) %p) sanitize_memory {
+define void @ByValForwardByVal(i32, ptr byval(i128) %p) sanitize_memory {
 ; CHECK-LABEL: @ByValForwardByVal(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[#]], i8* align 8 inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i8*), i64 16, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[#]], i8* align 4 inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i8*), i64 16, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), i8* %[[#]], i64 16, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 bitcast ([200 x i32]* @__msan_param_origin_tls to i8*), i8* align 4 %[[#]], i64 16, i1 false)
-; CHECK:         call void @FnByVal(i128* byval(i128) %p)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[#]], ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 16, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr @__msan_param_tls, ptr %[[#]], i64 16, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 4 @__msan_param_origin_tls, ptr align 4 %[[#]], i64 16, i1 false)
+; CHECK:         call void @FnByVal(ptr byval(i128) %p)
 ; CHECK:         ret void
 ;
 entry:
-  call void @FnByVal(i128* byval(i128) %p)
+  call void @FnByVal(ptr byval(i128) %p)
   ret void
 }
 
-define void @ByValForwardByValNoSanitize(i32, i128* byval(i128) %p) {
+define void @ByValForwardByValNoSanitize(i32, ptr byval(i128) %p) {
 ; CHECK-LABEL: @ByValForwardByValNoSanitize(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memset.p0i8.i64(i8* align 8 %[[#]], i8 0, i64 16, i1 false)
-; CHECK:         call void @llvm.memset.p0i8.i64(i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), i8 0, i64 16, i1 false)
-; CHECK:         call void @FnByVal(i128* byval(i128) %p)
+; CHECK:         call void @llvm.memset.p0.i64(ptr align 8 %[[#]], i8 0, i64 16, i1 false)
+; CHECK:         call void @llvm.memset.p0.i64(ptr @__msan_param_tls, i8 0, i64 16, i1 false)
+; CHECK:         call void @FnByVal(ptr byval(i128) %p)
 ; CHECK:         ret void
 ;
 entry:
-  call void @FnByVal(i128* byval(i128) %p)
+  call void @FnByVal(ptr byval(i128) %p)
   ret void
 }
 
-declare void @FnByVal8(i8* byval(i8) %p);
-declare void @Fn8(i8* %p);
+declare void @FnByVal8(ptr byval(i8) %p);
+declare void @Fn8(ptr %p);
 
-define i8 @ByValArgument8(i32, i8* byval(i8) %p) sanitize_memory {
+define i8 @ByValArgument8(i32, ptr byval(i8) %p) sanitize_memory {
 ; CHECK-LABEL: @ByValArgument8(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %[[#]], i8* align 1 inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i8*), i64 1, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[#]], i8* align 4 inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i8*), i64 4, i1 false)
-; CHECK:         [[X:%.*]] = load i8, i8* %p, align 1
-; CHECK:         [[_MSLD:%.*]] = load i8, i8* %[[#]], align 1
-; CHECK:         %[[#]] = load i32, i32* %[[#]], align 4
-; CHECK:         store i8 [[_MSLD]], i8* bitcast ([100 x i64]* @__msan_retval_tls to i8*), align 8
-; CHECK:         store i32 %[[#]], i32* @__msan_retval_origin_tls, align 4
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 1 %[[#]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false)
+; CHECK:         [[X:%.*]] = load i8, ptr %p, align 1
+; CHECK:         [[_MSLD:%.*]] = load i8, ptr %[[#]], align 1
+; CHECK:         %[[#]] = load i32, ptr %[[#]], align 4
+; CHECK:         store i8 [[_MSLD]], ptr @__msan_retval_tls, align 8
+; CHECK:         store i32 %[[#]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK:         ret i8 [[X]]
 ;
 entry:
-  %x = load i8, i8* %p
+  %x = load i8, ptr %p
   ret i8 %x
 }
 
-define i8 @ByValArgumentNoSanitize8(i32, i8* byval(i8) %p) {
+define i8 @ByValArgumentNoSanitize8(i32, ptr byval(i8) %p) {
 ; CHECK-LABEL: @ByValArgumentNoSanitize8(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memset.p0i8.i64(i8* align 1 %[[#]], i8 0, i64 1, i1 false)
-; CHECK:         [[X:%.*]] = load i8, i8* %p, align 1
-; CHECK:         store i8 0, i8* bitcast ([100 x i64]* @__msan_retval_tls to i8*), align 8
-; CHECK:         store i32 0, i32* @__msan_retval_origin_tls, align 4
+; CHECK:         call void @llvm.memset.p0.i64(ptr align 1 %[[#]], i8 0, i64 1, i1 false)
+; CHECK:         [[X:%.*]] = load i8, ptr %p, align 1
+; CHECK:         store i8 0, ptr @__msan_retval_tls, align 8
+; CHECK:         store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; CHECK:         ret i8 [[X]]
 ;
 entry:
-  %x = load i8, i8* %p
+  %x = load i8, ptr %p
   ret i8 %x
 }
 
-define void @ByValForward8(i32, i8* byval(i8) %p) sanitize_memory {
+define void @ByValForward8(i32, ptr byval(i8) %p) sanitize_memory {
 ; CHECK-LABEL: @ByValForward8(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %[[#]], i8* align 1 inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i8*), i64 1, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[#]], i8* align 4 inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i8*), i64 4, i1 false)
-; CHECK:         store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK:         call void @Fn8(i8* %p)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 1 %[[#]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false)
+; CHECK:         store i64 0, ptr @__msan_param_tls, align 8
+; CHECK:         call void @Fn8(ptr %p)
 ; CHECK:         ret void
 ;
 entry:
-  call void @Fn8(i8* %p)
+  call void @Fn8(ptr %p)
   ret void
 }
 
-define void @ByValForwardNoSanitize8(i32, i8* byval(i8) %p) {
+define void @ByValForwardNoSanitize8(i32, ptr byval(i8) %p) {
 ; CHECK-LABEL: @ByValForwardNoSanitize8(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memset.p0i8.i64(i8* align 1 %[[#]], i8 0, i64 1, i1 false)
-; CHECK:         store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK:         call void @Fn8(i8* %p)
+; CHECK:         call void @llvm.memset.p0.i64(ptr align 1 %[[#]], i8 0, i64 1, i1 false)
+; CHECK:         store i64 0, ptr @__msan_param_tls, align 8
+; CHECK:         call void @Fn8(ptr %p)
 ; CHECK:         ret void
 ;
 entry:
-  call void @Fn8(i8* %p)
+  call void @Fn8(ptr %p)
   ret void
 }
 
-define void @ByValForwardByVal8(i32, i8* byval(i8) %p) sanitize_memory {
+define void @ByValForwardByVal8(i32, ptr byval(i8) %p) sanitize_memory {
 ; CHECK-LABEL: @ByValForwardByVal8(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %[[#]], i8* align 1 inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i8*), i64 1, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[#]], i8* align 4 inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i8*), i64 4, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), i8* %[[#]], i64 1, i1 false)
-; CHECK:         call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 bitcast ([200 x i32]* @__msan_param_origin_tls to i8*), i8* align 4 %[[#]], i64 4, i1 false)
-; CHECK:         call void @FnByVal8(i8* byval(i8) %p)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 1 %[[#]], ptr align 1 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), i64 1, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[#]], ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), i64 4, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr @__msan_param_tls, ptr %[[#]], i64 1, i1 false)
+; CHECK:         call void @llvm.memcpy.p0.p0.i64(ptr align 4 @__msan_param_origin_tls, ptr align 4 %[[#]], i64 4, i1 false)
+; CHECK:         call void @FnByVal8(ptr byval(i8) %p)
 ; CHECK:         ret void
 ;
 entry:
-  call void @FnByVal8(i8* byval(i8) %p)
+  call void @FnByVal8(ptr byval(i8) %p)
   ret void
 }
 
-define void @ByValForwardByValNoSanitize8(i32, i8* byval(i8) %p) {
+define void @ByValForwardByValNoSanitize8(i32, ptr byval(i8) %p) {
 ; CHECK-LABEL: @ByValForwardByValNoSanitize8(
 ; CHECK-NEXT:  entry:
-; CHECK:         call void @llvm.memset.p0i8.i64(i8* align 1 %[[#]], i8 0, i64 1, i1 false)
-; CHECK:         call void @llvm.memset.p0i8.i64(i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), i8 0, i64 1, i1 false)
-; CHECK:         call void @FnByVal8(i8* byval(i8) %p)
+; CHECK:         call void @llvm.memset.p0.i64(ptr align 1 %[[#]], i8 0, i64 1, i1 false)
+; CHECK:         call void @llvm.memset.p0.i64(ptr @__msan_param_tls, i8 0, i64 1, i1 false)
+; CHECK:         call void @FnByVal8(ptr byval(i8) %p)
 ; CHECK:         ret void
 ;
 entry:
-  call void @FnByVal8(i8* byval(i8) %p)
+  call void @FnByVal8(ptr byval(i8) %p)
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/check-array.ll b/llvm/test/Instrumentation/MemorySanitizer/check-array.ll
index 0e30e651234ef..a4e5e37eaaf86 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/check-array.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/check-array.ll
@@ -4,10 +4,10 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define noundef [2 x i24] @check_array([2 x i24]* %p) sanitize_memory {
-; CHECK: @check_array([2 x i24]* [[P:%.*]])
-; CHECK: [[O:%.*]] = load [2 x i24], [2 x i24]* [[P]]
-  %o = load [2 x i24], [2 x i24]* %p
+define noundef [2 x i24] @check_array(ptr %p) sanitize_memory {
+; CHECK: @check_array(ptr [[P:%.*]])
+; CHECK: [[O:%.*]] = load [2 x i24], ptr [[P]]
+  %o = load [2 x i24], ptr %p
 ; CHECK: [[FIELD0:%.+]] = extractvalue [2 x i24] %_msld, 0
 ; CHECK: [[FIELD1:%.+]] = extractvalue [2 x i24] %_msld, 1
 ; CHECK: [[F1_OR:%.+]] = or i24 [[FIELD0]], [[FIELD1]]

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll
index cd8178cd7c5e2..2e89bbd2dcff2 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/check-constant-shadow.ll
@@ -13,16 +13,16 @@ entry:
 }
 
 ; CHECK-LABEL: @main
-; CHECK: store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*)
+; CHECK: store i32 0, ptr @__msan_retval_tls
 ; CONST: call void @__msan_warning_with_origin_noreturn
 ; CHECK: ret i32 undef
 
 
 ; This function stores known initialized value.
 ; Expect 2 stores: one for the shadow (0), one for the value (42), but no origin.
-define void @StoreConstant(i32* nocapture %p) nounwind uwtable sanitize_memory {
+define void @StoreConstant(ptr nocapture %p) nounwind uwtable sanitize_memory {
 entry:
-  store i32 42, i32* %p, align 4
+  store i32 42, ptr %p, align 4
   ret void
 }
 
@@ -35,9 +35,9 @@ entry:
 ; This function stores known uninitialized value.
 ; Expect 3 stores: shadow, value and origin.
 ; Expect no icmp(s): everything here is unconditional.
-define void @StoreUndef(i32* nocapture %p) nounwind uwtable sanitize_memory {
+define void @StoreUndef(ptr nocapture %p) nounwind uwtable sanitize_memory {
 entry:
-  store i32 undef, i32* %p, align 4
+  store i32 undef, ptr %p, align 4
   ret void
 }
 
@@ -59,8 +59,8 @@ entry:
 }
 
 ; CHECK-LABEL: @MaybeUninitialized
-; CHECK: store i32 extractelement (<4 x i32> bitcast (<2 x i64> <i64 0, i64 undef> to <4 x i32>), i64 0), i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK: store i32 0, i32* @__msan_retval_origin_tls
+; CHECK: store i32 extractelement (<4 x i32> bitcast (<2 x i64> <i64 0, i64 undef> to <4 x i32>), i64 0), ptr @__msan_retval_tls, align 8
+; CHECK: store i32 0, ptr @__msan_retval_origin_tls
 
 ; This function stores known initialized value, but msan can't prove this.
 define noundef i32 @MaybeUninitializedRetNoUndef(<2 x i64> noundef %acc) nounwind uwtable sanitize_memory {

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/check-struct.ll b/llvm/test/Instrumentation/MemorySanitizer/check-struct.ll
index bc9d699a785d9..d24ffe3296195 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/check-struct.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/check-struct.ll
@@ -6,10 +6,10 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK-LABEL: @main
 define { i32, i8 } @main() sanitize_memory {
-; CHECK: [[P:%.*]] = inttoptr i64 0 to { i32, i8 }*
-  %p = inttoptr i64 0 to { i32, i8 } *
-; CHECK: [[O:%.*]] = load { i32, i8 }, { i32, i8 }* [[P]]
-  %o = load { i32, i8 }, { i32, i8 } *%p
+; CHECK: [[P:%.*]] = inttoptr i64 0 to ptr
+  %p = inttoptr i64 0 to ptr
+; CHECK: [[O:%.*]] = load { i32, i8 }, ptr [[P]]
+  %o = load { i32, i8 }, ptr %p
 ; CHECK: [[FIELD0:%.+]] = extractvalue { i32, i8 } %_msld, 0
 ; CHECK: [[F0_POISONED:%.+]] = icmp ne i32 [[FIELD0]]
 ; CHECK: [[FIELD1:%.+]] = extractvalue { i32, i8 } %_msld, 1

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/check_access_address.ll b/llvm/test/Instrumentation/MemorySanitizer/check_access_address.ll
index 8a9846fe7b130..8800aa4bd9138 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/check_access_address.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/check_access_address.ll
@@ -6,33 +6,33 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; Test byval argument shadow alignment
 
-define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval(<2 x i64>) %p) sanitize_memory {
+define <2 x i64> @ByValArgumentShadowLargeAlignment(ptr byval(<2 x i64>) %p) sanitize_memory {
 entry:
-  %x = load <2 x i64>, <2 x i64>* %p
+  %x = load <2 x i64>, ptr %p
   ret <2 x i64> %x
 }
 
 ; CHECK-LABEL: @ByValArgumentShadowLargeAlignment
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 {{.*}}, i8* align 8 {{.*}}, i64 16, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{.*}}, ptr align 8 {{.*}}, i64 16, i1 false)
 ; CHECK: ret <2 x i64>
 
 
-define i16 @ByValArgumentShadowSmallAlignment(i16* byval(i16) %p) sanitize_memory {
+define i16 @ByValArgumentShadowSmallAlignment(ptr byval(i16) %p) sanitize_memory {
 entry:
-  %x = load i16, i16* %p
+  %x = load i16, ptr %p
   ret i16 %x
 }
 
 ; CHECK-LABEL: @ByValArgumentShadowSmallAlignment
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 {{.*}}, i8* align 2 {{.*}}, i64 2, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 2 {{.*}}, ptr align 2 {{.*}}, i64 2, i1 false)
 ; CHECK: ret i16
 
 
 ; Check instrumentation of stores. The check must precede the shadow store.
 
-define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
+define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i32 %x, i32* %p, align 4
+  store i32 %x, ptr %p, align 4
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/clmul.ll b/llvm/test/Instrumentation/MemorySanitizer/clmul.ll
index d9d187fe55f84..ae4f2d2c868a0 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/clmul.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/clmul.ll
@@ -67,4 +67,4 @@ entry:
 ; ORIGIN: %[[I:.*]] = icmp ne i512 %[[FLAT]], 0
 ; ORIGIN: %[[O:.*]] = select i1 %[[I]],
 ; CHECK: store <8 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
-; ORIGIN: store i32 %[[O]], i32* @__msan_retval_origin_tls
+; ORIGIN: store i32 %[[O]], ptr @__msan_retval_origin_tls

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/csr.ll b/llvm/test/Instrumentation/MemorySanitizer/csr.ll
index 160ebbbfdff1c..e0d6f991d95af 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/csr.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/csr.ll
@@ -6,23 +6,22 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare void @llvm.x86.sse.stmxcsr(i8*)
-declare void @llvm.x86.sse.ldmxcsr(i8*)
+declare void @llvm.x86.sse.stmxcsr(ptr)
+declare void @llvm.x86.sse.ldmxcsr(ptr)
 
-define void @getcsr(i32 *%p) sanitize_memory {
+define void @getcsr(ptr %p) sanitize_memory {
 entry:
-  %0 = bitcast i32* %p to i8*
-  call void @llvm.x86.sse.stmxcsr(i8* %0)
+  call void @llvm.x86.sse.stmxcsr(ptr %p)
   ret void
 }
 
 ; CHECK-LABEL: @getcsr(
-; CHECK: store i32 0, i32*
+; CHECK: store i32 0, ptr
 ; CHECK: call void @llvm.x86.sse.stmxcsr(
 ; CHECK: ret void
 
 ; ADDR-LABEL: @getcsr(
-; ADDR: %[[A:.*]] = load i64, i64* getelementptr inbounds {{.*}} @__msan_param_tls, i32 0, i32 0), align 8
+; ADDR: %[[A:.*]] = load i64, ptr @__msan_param_tls, align 8
 ; ADDR: %[[B:.*]] = icmp ne i64 %[[A]], 0
 ; ADDR: br i1 %[[B]], label {{.*}}, label
 ; ADDR: call void @__msan_warning_noreturn()
@@ -30,15 +29,14 @@ entry:
 ; ADDR: ret void
 
 ; Function Attrs: nounwind uwtable
-define void @setcsr(i32 *%p) sanitize_memory {
+define void @setcsr(ptr %p) sanitize_memory {
 entry:
-  %0 = bitcast i32* %p to i8*
-  call void @llvm.x86.sse.ldmxcsr(i8* %0)
+  call void @llvm.x86.sse.ldmxcsr(ptr %p)
   ret void
 }
 
 ; CHECK-LABEL: @setcsr(
-; CHECK: %[[A:.*]] = load i32, i32* %{{.*}}, align 1
+; CHECK: %[[A:.*]] = load i32, ptr %{{.*}}, align 1
 ; CHECK: %[[B:.*]] = icmp ne i32 %[[A]], 0
 ; CHECK: br i1 %[[B]], label {{.*}}, label
 ; CHECK: call void @__msan_warning_noreturn()
@@ -46,8 +44,8 @@ entry:
 ; CHECK: ret void
 
 ; ADDR-LABEL: @setcsr(
-; ADDR: %[[A:.*]] = load i64, i64* getelementptr inbounds {{.*}} @__msan_param_tls, i32 0, i32 0), align 8
-; ADDR: %[[C:.*]] = load i32, i32*
+; ADDR: %[[A:.*]] = load i64, ptr @__msan_param_tls, align 8
+; ADDR: %[[C:.*]] = load i32, ptr
 ; ADDR: %[[B:.*]] = icmp ne i64 %[[A]], 0
 ; ADDR: %[[D:.*]] = icmp ne i32 %[[C]], 0
 ; ADDR: %[[E:.*]] = or i1 %[[B]], %[[D]]

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/disambiguate-origin.ll b/llvm/test/Instrumentation/MemorySanitizer/disambiguate-origin.ll
index a9af4b382de2d..8feca5829b3ef 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/disambiguate-origin.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/disambiguate-origin.ll
@@ -20,18 +20,18 @@ target triple = "x86_64-unknown-linux-gnu"
 declare void @OneArg(i32 noundef %a) nounwind uwtable sanitize_memory;
 declare void @ManyArgs(i32 noundef %a, i32 noundef %b, i32 noundef %c) nounwind uwtable sanitize_memory;
 
-define void @TestOne(i32* noundef %a)  nounwind uwtable sanitize_memory {
+define void @TestOne(ptr noundef %a)  nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @TestOne(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1:![0-9]+]]
-; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[A:%.*]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i32* [[A]] to i64, !dbg [[DBG1]]
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[A:%.*]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to i32*, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to i32*, !dbg [[DBG1]]
-; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP2]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0, !dbg [[DBG7:![0-9]+]]
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP6:%.*]], label [[TMP7:%.*]], !dbg [[DBG7]], !prof [[PROF8:![0-9]+]]
 ; CHECK:       6:
@@ -42,39 +42,39 @@ define void @TestOne(i32* noundef %a)  nounwind uwtable sanitize_memory {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %v = load i32, i32* %a, !dbg !11
+  %v = load i32, ptr %a, !dbg !11
   call void @OneArg(i32 noundef %v), !dbg !10
   ret void
 }
 
-define void @TestMany(i32* noundef %a)  nounwind uwtable sanitize_memory {
+define void @TestMany(ptr noundef %a)  nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @TestMany(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    [[X:%.*]] = load i32, i32* [[A:%.*]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i32* [[A]] to i64, !dbg [[DBG1]]
+; CHECK-NEXT:    [[X:%.*]] = load i32, ptr [[A:%.*]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to i32*, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to i32*, !dbg [[DBG1]]
-; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, i32* [[TMP2]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[Y:%.*]] = load i32, i32* [[A]], align 4, !dbg [[DBG9:![0-9]+]]
-; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint i32* [[A]] to i64, !dbg [[DBG9]]
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP2]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[Y:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG9:![0-9]+]]
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG9]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080, !dbg [[DBG9]]
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to i32*, !dbg [[DBG9]]
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG9]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 17592186044416, !dbg [[DBG9]]
-; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to i32*, !dbg [[DBG9]]
-; CHECK-NEXT:    [[_MSLD1:%.*]] = load i32, i32* [[TMP8]], align 4, !dbg [[DBG9]]
-; CHECK-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4, !dbg [[DBG9]]
-; CHECK-NEXT:    [[Z:%.*]] = load i32, i32* [[A]], align 4, !dbg [[DBG10:![0-9]+]]
-; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint i32* [[A]] to i64, !dbg [[DBG10]]
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr, !dbg [[DBG9]]
+; CHECK-NEXT:    [[_MSLD1:%.*]] = load i32, ptr [[TMP8]], align 4, !dbg [[DBG9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !dbg [[DBG9]]
+; CHECK-NEXT:    [[Z:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG10:![0-9]+]]
+; CHECK-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG10]]
 ; CHECK-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080, !dbg [[DBG10]]
-; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to i32*, !dbg [[DBG10]]
+; CHECK-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr, !dbg [[DBG10]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 17592186044416, !dbg [[DBG10]]
-; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to i32*, !dbg [[DBG10]]
-; CHECK-NEXT:    [[_MSLD2:%.*]] = load i32, i32* [[TMP14]], align 4, !dbg [[DBG10]]
-; CHECK-NEXT:    [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4, !dbg [[DBG10]]
+; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr, !dbg [[DBG10]]
+; CHECK-NEXT:    [[_MSLD2:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG10]]
+; CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4, !dbg [[DBG10]]
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0, !dbg [[DBG7]]
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP18:%.*]], label [[TMP20:%.*]], !dbg [[DBG7]], !prof [[PROF8]]
 ; CHECK:       18:
@@ -100,9 +100,9 @@ define void @TestMany(i32* noundef %a)  nounwind uwtable sanitize_memory {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %x = load i32, i32* %a, !dbg !11
-  %y = load i32, i32* %a, !dbg !12
-  %z = load i32, i32* %a, !dbg !13
+  %x = load i32, ptr %a, !dbg !11
+  %y = load i32, ptr %a, !dbg !12
+  %z = load i32, ptr %a, !dbg !13
   call void @ManyArgs(i32 noundef %x, i32 noundef %y, i32 noundef %z), !dbg !10
   ret void
 }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/freeze.ll b/llvm/test/Instrumentation/MemorySanitizer/freeze.ll
index 787b109c6cd8d..8a14a63f93531 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/freeze.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/freeze.ll
@@ -3,18 +3,18 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define i32 @nofreeze(i32* %ptr) sanitize_memory {
+define i32 @nofreeze(ptr %ptr) sanitize_memory {
     ; CHECK-LABEL: @nofreeze
-    %val = load i32, i32* %ptr
+    %val = load i32, ptr %ptr
     ; CHECK: [[SHADOW_PTR:%.*]] = inttoptr
-    ; CHECK: [[SHADOW:%.*]] = load i32, i32* [[SHADOW_PTR]]
+    ; CHECK: [[SHADOW:%.*]] = load i32, ptr [[SHADOW_PTR]]
     ; CHECK: store i32 [[SHADOW]], {{.*}} @__msan_retval_tls
     ret i32 %val
 }
 
-define i32 @freeze_inst(i32* %ptr) sanitize_memory {
+define i32 @freeze_inst(ptr %ptr) sanitize_memory {
     ; CHECK-LABEL: @freeze_inst
-    %val = load i32, i32* %ptr
+    %val = load i32, ptr %ptr
     %freeze_val = freeze i32 %val
     ; CHECK-NOT: __msan_warning
     ; CHECK: store i32 0, {{.*}} @__msan_retval_tls

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll b/llvm/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
index ac1e1d91a295f..d7156540e003f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
@@ -17,9 +17,9 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
+define void @LoadAndCmp(ptr nocapture %a) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %if.end, label %if.then
 
@@ -42,9 +42,9 @@ declare void @foo(...)
 ; CHECK: ret void
 
 
-define void @Store(i64* nocapture %p, i64 %x) nounwind uwtable sanitize_memory {
+define void @Store(ptr nocapture %p, i64 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i64 %x, i64* %p, align 4
+  store i64 %x, ptr %p, align 4
   ret void
 }
 
@@ -53,9 +53,7 @@ entry:
 ; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
 ; CHECK: store
 ; CHECK-ORIGINS-NOT: __msan_chain_origin
-; CHECK-ORIGINS: bitcast i64* {{.*}} to i8*
-; CHECK-ORIGINS-NOT: __msan_chain_origin
-; CHECK-ORIGINS: call void @__msan_maybe_store_origin_8(i64 zeroext {{.*}}, i8* {{.*}}, i32 zeroext {{.*}})
+; CHECK-ORIGINS: call void @__msan_maybe_store_origin_8(i64 zeroext {{.*}}, ptr {{.*}}, i32 zeroext {{.*}})
 ; CHECK-ORIGINS-NOT: __msan_chain_origin
 ; CHECK: store i64
 ; CHECK: ret void

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll b/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll
index dcbd00802cd0b..742c45447ebc2 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/libatomic.ll
@@ -3,66 +3,58 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare void @__atomic_load(i64, i8*, i8*, i32)
-declare void @__atomic_store(i64, i8*, i8*, i32)
+declare void @__atomic_load(i64, ptr, ptr, i32)
+declare void @__atomic_store(i64, ptr, ptr, i32)
 
-define i24 @odd_sized_load(i24* %ptr) sanitize_memory {
-; CHECK: @odd_sized_load(i24* {{.*}}[[PTR:%.+]])
+define i24 @odd_sized_load(ptr %ptr) sanitize_memory {
+; CHECK: @odd_sized_load(ptr {{.*}}[[PTR:%.+]])
 ; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
 ; CHECK-ORIGIN: @__msan_set_alloca_origin
-; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
-; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
-; CHECK: call void @__atomic_load(i64 3, i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 2)
+; CHECK: call void @__atomic_load(i64 3, ptr [[PTR]], ptr [[VAL_PTR]], i32 2)
 
-; CHECK: ptrtoint i8* [[PTR_I8]]
+; CHECK: ptrtoint ptr [[PTR]]
 ; CHECK: xor
 ; CHECK: [[SPTR_I8:%.*]] = inttoptr
 ; CHECK-ORIGIN: add
 ; CHECK-ORIGIN: and
 ; CHECK-ORIGIN: [[OPTR:%.*]] = inttoptr
 
-; CHECK: ptrtoint i8* [[VAL_PTR_I8]]
+; CHECK: ptrtoint ptr [[VAL_PTR]]
 ; CHECK: xor
 ; CHECK: [[VAL_SPTR_I8:%.*]] = inttoptr
 ; CHECK-ORIGIN: add
 ; CHECK-ORIGIN: and
 ; CHECK-ORIGIN: [[VAL_OPTR:%.*]] = inttoptr
 
-; CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[VAL_SPTR_I8]], i8* align 1 [[SPTR_I8]], i64 3
+; CHECK: call void @llvm.memcpy{{.*}}(ptr align 1 [[VAL_SPTR_I8]], ptr align 1 [[SPTR_I8]], i64 3
 
-; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, i32* [[OPTR]]
+; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, ptr [[OPTR]]
 ; CHECK-ORIGIN: [[VAL_ORIGIN:%.*]] = call i32 @__msan_chain_origin(i32 [[ARG_ORIGIN]])
-; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[VAL_PTR_I8]], i64 3, i32 [[VAL_ORIGIN]])
+; CHECK-ORIGIN: call void @__msan_set_origin(ptr [[VAL_PTR]], i64 3, i32 [[VAL_ORIGIN]])
 
-; CHECK: [[VAL:%.*]] = load i24, i24* [[VAL_PTR]]
+; CHECK: [[VAL:%.*]] = load i24, ptr [[VAL_PTR]]
 ; CHECK: ret i24 [[VAL]]
   %val_ptr = alloca i24, align 1
-  %val_ptr_i8 = bitcast i24* %val_ptr to i8*
-  %ptr_i8 = bitcast i24* %ptr to i8*
-  call void @__atomic_load(i64 3, i8* %ptr_i8, i8* %val_ptr_i8, i32 0)
-  %val = load i24, i24* %val_ptr
+  call void @__atomic_load(i64 3, ptr %ptr, ptr %val_ptr, i32 0)
+  %val = load i24, ptr %val_ptr
   ret i24 %val
 }
 
-define void @odd_sized_store(i24* %ptr, i24 %val) sanitize_memory {
-; CHECK: @odd_sized_store(i24* {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]])
+define void @odd_sized_store(ptr %ptr, i24 %val) sanitize_memory {
+; CHECK: @odd_sized_store(ptr {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]])
 ; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
-; CHECK: store i24 [[VAL]], i24* [[VAL_PTR]]
-; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
-; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
+; CHECK: store i24 [[VAL]], ptr [[VAL_PTR]]
 
-; CHECK: ptrtoint i8* [[PTR_I8]]
+; CHECK: ptrtoint ptr [[PTR]]
 ; CHECK: xor
 ; CHECK: [[SPTR_I8:%.*]] = inttoptr
-; CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[SPTR_I8]], i8 0, i64 3
+; CHECK: call void @llvm.memset{{.*}}(ptr align 1 [[SPTR_I8]], i8 0, i64 3
 
-; CHECK: call void @__atomic_store(i64 3, i8* [[VAL_PTR_I8]], i8* [[PTR_I8]], i32 3)
+; CHECK: call void @__atomic_store(i64 3, ptr [[VAL_PTR]], ptr [[PTR]], i32 3)
 ; CHECK: ret void
   %val_ptr = alloca i24, align 1
-  store i24 %val, i24* %val_ptr
-  %val_ptr_i8 = bitcast i24* %val_ptr to i8*
-  %ptr_i8 = bitcast i24* %ptr to i8*
-  call void @__atomic_store(i64 3, i8* %val_ptr_i8, i8* %ptr_i8, i32 0)
+  store i24 %val, ptr %val_ptr
+  call void @__atomic_store(i64 3, ptr %val_ptr, ptr %ptr, i32 0)
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/manual-shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/manual-shadow.ll
index cc1127f9d5621..42c3656d6a7d6 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/manual-shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/manual-shadow.ll
@@ -14,9 +14,9 @@
 
 target triple = "x86_64-unknown-linux-gnu"
 
-define i32 @read_value(i32* %a) sanitize_memory {
+define i32 @read_value(ptr %a) sanitize_memory {
 entry:
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 ; CHECK-BASE-LABEL: @read_value

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
index 7d7bb8d078100..24fde53eba63a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
@@ -9,35 +9,35 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32, <4 x i1>)
-declare <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>)
-declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x float*>, i32, <16 x i1>, <16 x float>)
-declare void @llvm.masked.scatter.v8i32.v8p0  (<8 x i32>, <8 x i32*>, i32, <8 x i1>)
-declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>)
-declare void @llvm.masked.compressstore.v16f32(<16 x float>, float*, <16 x i1>)
+declare void @llvm.masked.store.v4i64.p0(<4 x i64>, ptr, i32, <4 x i1>)
+declare <4 x double> @llvm.masked.load.v4f64.p0(ptr, i32, <4 x i1>, <4 x double>)
+declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
+declare void @llvm.masked.scatter.v8i32.v8p0  (<8 x i32>, <8 x ptr>, i32, <8 x i1>)
+declare <16 x float> @llvm.masked.expandload.v16f32(ptr, <16 x i1>, <16 x float>)
+declare void @llvm.masked.compressstore.v16f32(<16 x float>, ptr, <16 x i1>)
 
-define void @Store(<4 x i64>* %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory {
+define void @Store(ptr %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory {
 ; CHECK-LABEL: @Store(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
-; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to <4 x i64>*
-; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[TMP0]], <4 x i64>* [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]])
-; CHECK-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
 ; CHECK-NEXT:    ret void
 ;
 ; ADDR-LABEL: @Store(
 ; ADDR-NEXT:  entry:
-; ADDR-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
-; ADDR-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; ADDR-NEXT:    [[TMP2:%.*]] = load <4 x i1>, <4 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i1>*), align 8
+; ADDR-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; ADDR-NEXT:    [[TMP2:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
 ; ADDR-NEXT:    call void @llvm.donothing()
-; ADDR-NEXT:    [[TMP3:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; ADDR-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; ADDR-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
-; ADDR-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to <4 x i64>*
-; ADDR-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[TMP0]], <4 x i64>* [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]])
+; ADDR-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; ADDR-NEXT:    call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]])
 ; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; ADDR-NEXT:    [[TMP6:%.*]] = bitcast <4 x i1> [[TMP2]] to i4
 ; ADDR-NEXT:    [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0
@@ -47,67 +47,67 @@ define void @Store(<4 x i64>* %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory
 ; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7:[0-9]+]]
 ; ADDR-NEXT:    unreachable
 ; ADDR:       8:
-; ADDR-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; ADDR-NEXT:    tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
 ; ADDR-NEXT:    ret void
 ;
 ; ORIGINS-LABEL: @Store(
 ; ORIGINS-NEXT:  entry:
-; ORIGINS-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
-; ORIGINS-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4
+; ORIGINS-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; ORIGINS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
-; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to <4 x i64>*
+; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
 ; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
 ; ORIGINS-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
-; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*
-; ORIGINS-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[TMP0]], <4 x i64>* [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]])
-; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP7]], align 4
-; ORIGINS-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[TMP7]], i32 1
-; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
-; ORIGINS-NEXT:    [[TMP9:%.*]] = getelementptr i32, i32* [[TMP7]], i32 2
-; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP9]], align 4
-; ORIGINS-NEXT:    [[TMP10:%.*]] = getelementptr i32, i32* [[TMP7]], i32 3
-; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
-; ORIGINS-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[TMP7]], i32 4
-; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP11]], align 4
-; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP7]], i32 5
-; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP12]], align 4
-; ORIGINS-NEXT:    [[TMP13:%.*]] = getelementptr i32, i32* [[TMP7]], i32 6
-; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP13]], align 4
-; ORIGINS-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[TMP7]], i32 7
-; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP14]], align 4
-; ORIGINS-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; ORIGINS-NEXT:    call void @llvm.masked.store.v4i64.p0(<4 x i64> [[TMP0]], ptr [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]])
+; ORIGINS-NEXT:    store i32 [[TMP1]], ptr [[TMP7]], align 4
+; ORIGINS-NEXT:    [[TMP8:%.*]] = getelementptr i32, ptr [[TMP7]], i32 1
+; ORIGINS-NEXT:    store i32 [[TMP1]], ptr [[TMP8]], align 4
+; ORIGINS-NEXT:    [[TMP9:%.*]] = getelementptr i32, ptr [[TMP7]], i32 2
+; ORIGINS-NEXT:    store i32 [[TMP1]], ptr [[TMP9]], align 4
+; ORIGINS-NEXT:    [[TMP10:%.*]] = getelementptr i32, ptr [[TMP7]], i32 3
+; ORIGINS-NEXT:    store i32 [[TMP1]], ptr [[TMP10]], align 4
+; ORIGINS-NEXT:    [[TMP11:%.*]] = getelementptr i32, ptr [[TMP7]], i32 4
+; ORIGINS-NEXT:    store i32 [[TMP1]], ptr [[TMP11]], align 4
+; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP7]], i32 5
+; ORIGINS-NEXT:    store i32 [[TMP1]], ptr [[TMP12]], align 4
+; ORIGINS-NEXT:    [[TMP13:%.*]] = getelementptr i32, ptr [[TMP7]], i32 6
+; ORIGINS-NEXT:    store i32 [[TMP1]], ptr [[TMP13]], align 4
+; ORIGINS-NEXT:    [[TMP14:%.*]] = getelementptr i32, ptr [[TMP7]], i32 7
+; ORIGINS-NEXT:    store i32 [[TMP1]], ptr [[TMP14]], align 4
+; ORIGINS-NEXT:    tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
 ; ORIGINS-NEXT:    ret void
 ;
 entry:
-  tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask)
+  tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %v, ptr %p, i32 1, <4 x i1> %mask)
   ret void
 }
 
-define <4 x double> @Load(<4 x double>* %p, <4 x double> %v, <4 x i1> %mask) sanitize_memory {
+define <4 x double> @Load(ptr %p, <4 x double> %v, <4 x i1> %mask) sanitize_memory {
 ; CHECK-LABEL: @Load(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint <4 x double>* [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
-; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to <4 x i64>*
-; CHECK-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
-; CHECK-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
-; CHECK-NEXT:    store <4 x i64> [[_MSMASKEDLD]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
+; CHECK-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
+; CHECK-NEXT:    store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[X]]
 ;
 ; ADDR-LABEL: @Load(
 ; ADDR-NEXT:  entry:
-; ADDR-NEXT:    [[TMP0:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; ADDR-NEXT:    [[TMP1:%.*]] = load <4 x i1>, <4 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i1>*), align 8
-; ADDR-NEXT:    [[TMP2:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; ADDR-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; ADDR-NEXT:    [[TMP1:%.*]] = load <4 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; ADDR-NEXT:    [[TMP2:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; ADDR-NEXT:    call void @llvm.donothing()
-; ADDR-NEXT:    [[TMP3:%.*]] = ptrtoint <4 x double>* [[P:%.*]] to i64
+; ADDR-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; ADDR-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
-; ADDR-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to <4 x i64>*
-; ADDR-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP2]])
+; ADDR-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; ADDR-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP2]])
 ; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP0]], 0
 ; ADDR-NEXT:    [[TMP6:%.*]] = bitcast <4 x i1> [[TMP1]] to i4
 ; ADDR-NEXT:    [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0
@@ -117,144 +117,144 @@ define <4 x double> @Load(<4 x double>* %p, <4 x double> %v, <4 x i1> %mask) san
 ; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
 ; ADDR-NEXT:    unreachable
 ; ADDR:       8:
-; ADDR-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
-; ADDR-NEXT:    store <4 x i64> [[_MSMASKEDLD]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; ADDR-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
+; ADDR-NEXT:    store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8
 ; ADDR-NEXT:    ret <4 x double> [[X]]
 ;
 ; ORIGINS-LABEL: @Load(
 ; ORIGINS-NEXT:  entry:
-; ORIGINS-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
-; ORIGINS-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4
+; ORIGINS-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; ORIGINS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint <4 x double>* [[P:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
-; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to <4 x i64>*
+; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
 ; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
 ; ORIGINS-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
-; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*
-; ORIGINS-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
+; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; ORIGINS-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
 ; ORIGINS-NEXT:    [[TMP8:%.*]] = sub <4 x i1> zeroinitializer, [[MASK]]
 ; ORIGINS-NEXT:    [[TMP9:%.*]] = sext <4 x i1> [[TMP8]] to <4 x i64>
 ; ORIGINS-NEXT:    [[TMP10:%.*]] = and <4 x i64> [[TMP0]], [[TMP9]]
 ; ORIGINS-NEXT:    [[TMP11:%.*]] = bitcast <4 x i64> [[TMP10]] to i256
 ; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP11]], 0
-; ORIGINS-NEXT:    [[TMP12:%.*]] = load i32, i32* [[TMP7]], align 4
+; ORIGINS-NEXT:    [[TMP12:%.*]] = load i32, ptr [[TMP7]], align 4
 ; ORIGINS-NEXT:    [[TMP13:%.*]] = select i1 [[_MSCMP]], i32 [[TMP1]], i32 [[TMP12]]
-; ORIGINS-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
-; ORIGINS-NEXT:    store <4 x i64> [[_MSMASKEDLD]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGINS-NEXT:    store i32 [[TMP13]], i32* @__msan_retval_origin_tls, align 4
+; ORIGINS-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
+; ORIGINS-NEXT:    store <4 x i64> [[_MSMASKEDLD]], ptr @__msan_retval_tls, align 8
+; ORIGINS-NEXT:    store i32 [[TMP13]], ptr @__msan_retval_origin_tls, align 4
 ; ORIGINS-NEXT:    ret <4 x double> [[X]]
 ;
 entry:
-  %x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v)
+  %x = call <4 x double> @llvm.masked.load.v4f64.p0(ptr %p, i32 1, <4 x i1> %mask, <4 x double> %v)
   ret <4 x double> %x
 }
 
-define void @StoreNoSanitize(<4 x i64>* %p, <4 x i64> %v, <4 x i1> %mask) {
+define void @StoreNoSanitize(ptr %p, <4 x i64> %v, <4 x i1> %mask) {
 ; CHECK-LABEL: @StoreNoSanitize(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to <4 x i64>*
-; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> zeroinitializer, <4 x i64>* [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
-; CHECK-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
 ; CHECK-NEXT:    ret void
 ;
 ; ADDR-LABEL: @StoreNoSanitize(
 ; ADDR-NEXT:  entry:
 ; ADDR-NEXT:    call void @llvm.donothing()
-; ADDR-NEXT:    [[TMP0:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; ADDR-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; ADDR-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
-; ADDR-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to <4 x i64>*
-; ADDR-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> zeroinitializer, <4 x i64>* [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
-; ADDR-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; ADDR-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
+; ADDR-NEXT:    call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
+; ADDR-NEXT:    tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
 ; ADDR-NEXT:    ret void
 ;
 ; ORIGINS-LABEL: @StoreNoSanitize(
 ; ORIGINS-NEXT:  entry:
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP0:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[P:%.*]] to i64
 ; ORIGINS-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
-; ORIGINS-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to <4 x i64>*
+; ORIGINS-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
 ; ORIGINS-NEXT:    [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416
 ; ORIGINS-NEXT:    [[TMP4:%.*]] = and i64 [[TMP3]], -4
-; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
-; ORIGINS-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> zeroinitializer, <4 x i64>* [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
-; ORIGINS-NEXT:    store i32 0, i32* [[TMP5]], align 4
-; ORIGINS-NEXT:    [[TMP6:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
-; ORIGINS-NEXT:    store i32 0, i32* [[TMP6]], align 4
-; ORIGINS-NEXT:    [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 2
-; ORIGINS-NEXT:    store i32 0, i32* [[TMP7]], align 4
-; ORIGINS-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[TMP5]], i32 3
-; ORIGINS-NEXT:    store i32 0, i32* [[TMP8]], align 4
-; ORIGINS-NEXT:    [[TMP9:%.*]] = getelementptr i32, i32* [[TMP5]], i32 4
-; ORIGINS-NEXT:    store i32 0, i32* [[TMP9]], align 4
-; ORIGINS-NEXT:    [[TMP10:%.*]] = getelementptr i32, i32* [[TMP5]], i32 5
-; ORIGINS-NEXT:    store i32 0, i32* [[TMP10]], align 4
-; ORIGINS-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[TMP5]], i32 6
-; ORIGINS-NEXT:    store i32 0, i32* [[TMP11]], align 4
-; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP5]], i32 7
-; ORIGINS-NEXT:    store i32 0, i32* [[TMP12]], align 4
-; ORIGINS-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; ORIGINS-NEXT:    call void @llvm.masked.store.v4i64.p0(<4 x i64> zeroinitializer, ptr [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
+; ORIGINS-NEXT:    store i32 0, ptr [[TMP5]], align 4
+; ORIGINS-NEXT:    [[TMP6:%.*]] = getelementptr i32, ptr [[TMP5]], i32 1
+; ORIGINS-NEXT:    store i32 0, ptr [[TMP6]], align 4
+; ORIGINS-NEXT:    [[TMP7:%.*]] = getelementptr i32, ptr [[TMP5]], i32 2
+; ORIGINS-NEXT:    store i32 0, ptr [[TMP7]], align 4
+; ORIGINS-NEXT:    [[TMP8:%.*]] = getelementptr i32, ptr [[TMP5]], i32 3
+; ORIGINS-NEXT:    store i32 0, ptr [[TMP8]], align 4
+; ORIGINS-NEXT:    [[TMP9:%.*]] = getelementptr i32, ptr [[TMP5]], i32 4
+; ORIGINS-NEXT:    store i32 0, ptr [[TMP9]], align 4
+; ORIGINS-NEXT:    [[TMP10:%.*]] = getelementptr i32, ptr [[TMP5]], i32 5
+; ORIGINS-NEXT:    store i32 0, ptr [[TMP10]], align 4
+; ORIGINS-NEXT:    [[TMP11:%.*]] = getelementptr i32, ptr [[TMP5]], i32 6
+; ORIGINS-NEXT:    store i32 0, ptr [[TMP11]], align 4
+; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i32 7
+; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
+; ORIGINS-NEXT:    tail call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[P]], i32 1, <4 x i1> [[MASK]])
 ; ORIGINS-NEXT:    ret void
 ;
 entry:
-  tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask)
+  tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %v, ptr %p, i32 1, <4 x i1> %mask)
   ret void
 }
 
-define <4 x double> @LoadNoSanitize(<4 x double>* %p, <4 x double> %v, <4 x i1> %mask) {
+define <4 x double> @LoadNoSanitize(ptr %p, <4 x double> %v, <4 x i1> %mask) {
 ; CHECK-LABEL: @LoadNoSanitize(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
-; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x double> [[X]]
 ;
 ; ADDR-LABEL: @LoadNoSanitize(
 ; ADDR-NEXT:  entry:
 ; ADDR-NEXT:    call void @llvm.donothing()
-; ADDR-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
-; ADDR-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; ADDR-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
+; ADDR-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; ADDR-NEXT:    ret <4 x double> [[X]]
 ;
 ; ORIGINS-LABEL: @LoadNoSanitize(
 ; ORIGINS-NEXT:  entry:
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
-; ORIGINS-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
-; ORIGINS-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; ORIGINS-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
+; ORIGINS-NEXT:    store <4 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
+; ORIGINS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; ORIGINS-NEXT:    ret <4 x double> [[X]]
 ;
 entry:
-  %x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v)
+  %x = call <4 x double> @llvm.masked.load.v4f64.p0(ptr %p, i32 1, <4 x i1> %mask, <4 x double> %v)
   ret <4 x double> %x
 }
 
 ; FIXME: Provide real implementation.
-define <16 x float> @Gather(<16 x float*> %ptrs, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory {
+define <16 x float> @Gather(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory {
 ; CHECK-LABEL: @Gather(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 136) to <16 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint <16 x float*> [[PTRS:%.*]] to <16 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor <16 x i64> [[TMP2]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr <16 x i64> [[TMP3]] to <16 x i32*>
-; CHECK-NEXT:    [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*> [[TMP4]], i32 4, <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
-; CHECK-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
-; CHECK-NEXT:    store <16 x i32> [[_MSMASKEDGATHER]], <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr <16 x i64> [[TMP3]] to <16 x ptr>
+; CHECK-NEXT:    [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP4]], i32 4, <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
+; CHECK-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
+; CHECK-NEXT:    store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x float> [[RET]]
 ;
 ; ADDR-LABEL: @Gather(
-; ADDR-NEXT:    [[TMP1:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <16 x i1>*), align 8
-; ADDR-NEXT:    [[TMP2:%.*]] = load <16 x i64>, <16 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i64>*), align 8
-; ADDR-NEXT:    [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 136) to <16 x i32>*), align 8
+; ADDR-NEXT:    [[TMP1:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
+; ADDR-NEXT:    [[TMP2:%.*]] = load <16 x i64>, ptr @__msan_param_tls, align 8
+; ADDR-NEXT:    [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
 ; ADDR-NEXT:    call void @llvm.donothing()
 ; ADDR-NEXT:    [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> [[TMP2]], <16 x i64> zeroinitializer
-; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint <16 x float*> [[PTRS:%.*]] to <16 x i64>
+; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
 ; ADDR-NEXT:    [[TMP5:%.*]] = xor <16 x i64> [[TMP4]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr <16 x i64> [[TMP5]] to <16 x i32*>
-; ADDR-NEXT:    [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*> [[TMP6]], i32 4, <16 x i1> [[MASK]], <16 x i32> [[TMP3]])
+; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr <16 x i64> [[TMP5]] to <16 x ptr>
+; ADDR-NEXT:    [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP6]], i32 4, <16 x i1> [[MASK]], <16 x i32> [[TMP3]])
 ; ADDR-NEXT:    [[TMP7:%.*]] = bitcast <16 x i1> [[TMP1]] to i16
 ; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i16 [[TMP7]], 0
 ; ADDR-NEXT:    [[TMP8:%.*]] = bitcast <16 x i64> [[_MSMASKEDPTRS]] to i1024
@@ -265,76 +265,76 @@ define <16 x float> @Gather(<16 x float*> %ptrs, <16 x i1> %mask, <16 x float> %
 ; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
 ; ADDR-NEXT:    unreachable
 ; ADDR:       10:
-; ADDR-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
-; ADDR-NEXT:    store <16 x i32> [[_MSMASKEDGATHER]], <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
+; ADDR-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
+; ADDR-NEXT:    store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8
 ; ADDR-NEXT:    ret <16 x float> [[RET]]
 ;
 ; ORIGINS-LABEL: @Gather(
-; ORIGINS-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 136) to <16 x i32>*), align 8
-; ORIGINS-NEXT:    [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 136) to i32*), align 4
+; ORIGINS-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; ORIGINS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 136) to ptr), align 4
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP3:%.*]] = ptrtoint <16 x float*> [[PTRS:%.*]] to <16 x i64>
+; ORIGINS-NEXT:    [[TMP3:%.*]] = ptrtoint <16 x ptr> [[PTRS:%.*]] to <16 x i64>
 ; ORIGINS-NEXT:    [[TMP4:%.*]] = xor <16 x i64> [[TMP3]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr <16 x i64> [[TMP4]] to <16 x i32*>
+; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr <16 x i64> [[TMP4]] to <16 x ptr>
 ; ORIGINS-NEXT:    [[TMP6:%.*]] = add <16 x i64> [[TMP4]], <i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416>
-; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr <16 x i64> [[TMP6]] to <16 x i32*>
-; ORIGINS-NEXT:    [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*> [[TMP5]], i32 4, <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
-; ORIGINS-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
-; ORIGINS-NEXT:    store <16 x i32> [[_MSMASKEDGATHER]], <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
-; ORIGINS-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr <16 x i64> [[TMP6]] to <16 x ptr>
+; ORIGINS-NEXT:    [[_MSMASKEDGATHER:%.*]] = call <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr> [[TMP5]], i32 4, <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
+; ORIGINS-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
+; ORIGINS-NEXT:    store <16 x i32> [[_MSMASKEDGATHER]], ptr @__msan_retval_tls, align 8
+; ORIGINS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; ORIGINS-NEXT:    ret <16 x float> [[RET]]
 ;
-  %ret = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x float*> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %passthru)
+  %ret = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %passthru)
   ret <16 x float> %ret
 }
 
-define <16 x float> @GatherNoSanitize(<16 x float*> %ptrs, <16 x i1> %mask, <16 x float> %passthru) {
+define <16 x float> @GatherNoSanitize(<16 x ptr> %ptrs, <16 x i1> %mask, <16 x float> %passthru) {
 ; CHECK-LABEL: @GatherNoSanitize(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
+; CHECK-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x float> [[RET]]
 ;
 ; ADDR-LABEL: @GatherNoSanitize(
 ; ADDR-NEXT:    call void @llvm.donothing()
 ; ADDR-NEXT:    [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> zeroinitializer, <16 x i64> zeroinitializer
-; ADDR-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
-; ADDR-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
+; ADDR-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
+; ADDR-NEXT:    store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; ADDR-NEXT:    ret <16 x float> [[RET]]
 ;
 ; ORIGINS-LABEL: @GatherNoSanitize(
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
-; ORIGINS-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
-; ORIGINS-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; ORIGINS-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
+; ORIGINS-NEXT:    store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; ORIGINS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; ORIGINS-NEXT:    ret <16 x float> [[RET]]
 ;
-  %ret = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x float*> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %passthru)
+  %ret = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %passthru)
   ret <16 x float> %ret
 }
 
 ; FIXME: Provide real implementation.
-define void @Scatter(<8 x i32> %value, <8 x i32*> %ptrs, <8 x i1> %mask) sanitize_memory {
+define void @Scatter(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask) sanitize_memory {
 ; CHECK-LABEL: @Scatter(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint <8 x i32*> [[PTRS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor <8 x i64> [[TMP2]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr <8 x i64> [[TMP3]] to <8 x i32*>
-; CHECK-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[TMP1]], <8 x i32*> [[TMP4]], i32 8, <8 x i1> [[MASK:%.*]])
-; CHECK-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS]], i32 8, <8 x i1> [[MASK]])
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr <8 x i64> [[TMP3]] to <8 x ptr>
+; CHECK-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP1]], <8 x ptr> [[TMP4]], i32 8, <8 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
 ; CHECK-NEXT:    ret void
 ;
 ; ADDR-LABEL: @Scatter(
-; ADDR-NEXT:    [[TMP1:%.*]] = load <8 x i1>, <8 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 96) to <8 x i1>*), align 8
-; ADDR-NEXT:    [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i64>*), align 8
-; ADDR-NEXT:    [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; ADDR-NEXT:    [[TMP1:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 96) to ptr), align 8
+; ADDR-NEXT:    [[TMP2:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; ADDR-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
 ; ADDR-NEXT:    call void @llvm.donothing()
 ; ADDR-NEXT:    [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> [[TMP2]], <8 x i64> zeroinitializer
-; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint <8 x i32*> [[PTRS:%.*]] to <8 x i64>
+; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
 ; ADDR-NEXT:    [[TMP5:%.*]] = xor <8 x i64> [[TMP4]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr <8 x i64> [[TMP5]] to <8 x i32*>
-; ADDR-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[TMP3]], <8 x i32*> [[TMP6]], i32 8, <8 x i1> [[MASK]])
+; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr <8 x i64> [[TMP5]] to <8 x ptr>
+; ADDR-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP3]], <8 x ptr> [[TMP6]], i32 8, <8 x i1> [[MASK]])
 ; ADDR-NEXT:    [[TMP7:%.*]] = bitcast <8 x i1> [[TMP1]] to i8
 ; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i8 [[TMP7]], 0
 ; ADDR-NEXT:    [[TMP8:%.*]] = bitcast <8 x i64> [[_MSMASKEDPTRS]] to i512
@@ -345,83 +345,83 @@ define void @Scatter(<8 x i32> %value, <8 x i32*> %ptrs, <8 x i1> %mask) sanitiz
 ; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
 ; ADDR-NEXT:    unreachable
 ; ADDR:       10:
-; ADDR-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS]], i32 8, <8 x i1> [[MASK]])
+; ADDR-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
 ; ADDR-NEXT:    ret void
 ;
 ; ORIGINS-LABEL: @Scatter(
-; ORIGINS-NEXT:    [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; ORIGINS-NEXT:    [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; ORIGINS-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; ORIGINS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP3:%.*]] = ptrtoint <8 x i32*> [[PTRS:%.*]] to <8 x i64>
+; ORIGINS-NEXT:    [[TMP3:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
 ; ORIGINS-NEXT:    [[TMP4:%.*]] = xor <8 x i64> [[TMP3]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr <8 x i64> [[TMP4]] to <8 x i32*>
+; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr <8 x i64> [[TMP4]] to <8 x ptr>
 ; ORIGINS-NEXT:    [[TMP6:%.*]] = add <8 x i64> [[TMP4]], <i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416>
-; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr <8 x i64> [[TMP6]] to <8 x i32*>
-; ORIGINS-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[TMP1]], <8 x i32*> [[TMP5]], i32 8, <8 x i1> [[MASK:%.*]])
-; ORIGINS-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS]], i32 8, <8 x i1> [[MASK]])
+; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr <8 x i64> [[TMP6]] to <8 x ptr>
+; ORIGINS-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP1]], <8 x ptr> [[TMP5]], i32 8, <8 x i1> [[MASK:%.*]])
+; ORIGINS-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
 ; ORIGINS-NEXT:    ret void
 ;
-  call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x i32*> %ptrs, i32 8, <8 x i1> %mask)
+  call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x ptr> %ptrs, i32 8, <8 x i1> %mask)
   ret void
 }
 
-define void @ScatterNoSanitize(<8 x i32> %value, <8 x i32*> %ptrs, <8 x i1> %mask) {
+define void @ScatterNoSanitize(<8 x i32> %value, <8 x ptr> %ptrs, <8 x i1> %mask) {
 ; CHECK-LABEL: @ScatterNoSanitize(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint <8 x i32*> [[PTRS:%.*]] to <8 x i64>
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x i32*>
-; CHECK-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> zeroinitializer, <8 x i32*> [[TMP3]], i32 8, <8 x i1> [[MASK:%.*]])
-; CHECK-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS]], i32 8, <8 x i1> [[MASK]])
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr>
+; CHECK-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
 ; CHECK-NEXT:    ret void
 ;
 ; ADDR-LABEL: @ScatterNoSanitize(
 ; ADDR-NEXT:    call void @llvm.donothing()
 ; ADDR-NEXT:    [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> zeroinitializer, <8 x i64> zeroinitializer
-; ADDR-NEXT:    [[TMP1:%.*]] = ptrtoint <8 x i32*> [[PTRS:%.*]] to <8 x i64>
+; ADDR-NEXT:    [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
 ; ADDR-NEXT:    [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; ADDR-NEXT:    [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x i32*>
-; ADDR-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> zeroinitializer, <8 x i32*> [[TMP3]], i32 8, <8 x i1> [[MASK]])
-; ADDR-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS]], i32 8, <8 x i1> [[MASK]])
+; ADDR-NEXT:    [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr>
+; ADDR-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK]])
+; ADDR-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
 ; ADDR-NEXT:    ret void
 ;
 ; ORIGINS-LABEL: @ScatterNoSanitize(
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint <8 x i32*> [[PTRS:%.*]] to <8 x i64>
+; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint <8 x ptr> [[PTRS:%.*]] to <8 x i64>
 ; ORIGINS-NEXT:    [[TMP2:%.*]] = xor <8 x i64> [[TMP1]], <i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080, i64 87960930222080>
-; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x i32*>
+; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr <8 x i64> [[TMP2]] to <8 x ptr>
 ; ORIGINS-NEXT:    [[TMP4:%.*]] = add <8 x i64> [[TMP2]], <i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416, i64 17592186044416>
-; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr <8 x i64> [[TMP4]] to <8 x i32*>
-; ORIGINS-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> zeroinitializer, <8 x i32*> [[TMP3]], i32 8, <8 x i1> [[MASK:%.*]])
-; ORIGINS-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS]], i32 8, <8 x i1> [[MASK]])
+; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr <8 x i64> [[TMP4]] to <8 x ptr>
+; ORIGINS-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> zeroinitializer, <8 x ptr> [[TMP3]], i32 8, <8 x i1> [[MASK:%.*]])
+; ORIGINS-NEXT:    call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[VALUE:%.*]], <8 x ptr> [[PTRS]], i32 8, <8 x i1> [[MASK]])
 ; ORIGINS-NEXT:    ret void
 ;
-  call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x i32*> %ptrs, i32 8, <8 x i1> %mask)
+  call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x ptr> %ptrs, i32 8, <8 x i1> %mask)
   ret void
 }
 
 ; FIXME: Provide real implementation.
-define <16 x float> @ExpandLoad(float* %ptr, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory {
+define <16 x float> @ExpandLoad(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) sanitize_memory {
 ; CHECK-LABEL: @ExpandLoad(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <16 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to i32*
-; CHECK-NEXT:    [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(i32* [[TMP4]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
-; CHECK-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(float* [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
-; CHECK-NEXT:    store <16 x i32> [[_MSMASKEDEXPLOAD]], <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP4]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
+; CHECK-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
+; CHECK-NEXT:    store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x float> [[RET]]
 ;
 ; ADDR-LABEL: @ExpandLoad(
-; ADDR-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; ADDR-NEXT:    [[TMP2:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i1>*), align 8
-; ADDR-NEXT:    [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <16 x i32>*), align 8
+; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; ADDR-NEXT:    [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; ADDR-NEXT:    [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; ADDR-NEXT:    call void @llvm.donothing()
-; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
-; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to i32*
-; ADDR-NEXT:    [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(i32* [[TMP6]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP3]])
+; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; ADDR-NEXT:    [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP6]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP3]])
 ; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; ADDR-NEXT:    [[TMP7:%.*]] = bitcast <16 x i1> [[TMP2]] to i16
 ; ADDR-NEXT:    [[_MSCMP1:%.*]] = icmp ne i16 [[TMP7]], 0
@@ -431,75 +431,75 @@ define <16 x float> @ExpandLoad(float* %ptr, <16 x i1> %mask, <16 x float> %pass
 ; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
 ; ADDR-NEXT:    unreachable
 ; ADDR:       9:
-; ADDR-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(float* [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
-; ADDR-NEXT:    store <16 x i32> [[_MSMASKEDEXPLOAD]], <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
+; ADDR-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
+; ADDR-NEXT:    store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8
 ; ADDR-NEXT:    ret <16 x float> [[RET]]
 ;
 ; ORIGINS-LABEL: @ExpandLoad(
-; ORIGINS-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <16 x i32>*), align 8
-; ORIGINS-NEXT:    [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 16) to i32*), align 4
+; ORIGINS-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; ORIGINS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP3:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; ORIGINS-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
-; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
+; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
 ; ORIGINS-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
 ; ORIGINS-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
-; ORIGINS-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to i32*
-; ORIGINS-NEXT:    [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(i32* [[TMP5]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
-; ORIGINS-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(float* [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
-; ORIGINS-NEXT:    store <16 x i32> [[_MSMASKEDEXPLOAD]], <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
-; ORIGINS-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; ORIGINS-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; ORIGINS-NEXT:    [[_MSMASKEDEXPLOAD:%.*]] = call <16 x i32> @llvm.masked.expandload.v16i32(ptr [[TMP5]], <16 x i1> [[MASK:%.*]], <16 x i32> [[TMP1]])
+; ORIGINS-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR]], <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
+; ORIGINS-NEXT:    store <16 x i32> [[_MSMASKEDEXPLOAD]], ptr @__msan_retval_tls, align 8
+; ORIGINS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; ORIGINS-NEXT:    ret <16 x float> [[RET]]
 ;
-  %ret = call <16 x float> @llvm.masked.expandload.v16f32(float* %ptr, <16 x i1> %mask, <16 x float> %passthru)
+  %ret = call <16 x float> @llvm.masked.expandload.v16f32(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru)
   ret <16 x float> %ret
 }
 
-define <16 x float> @ExpandLoadNoSanitize(float* %ptr, <16 x i1> %mask, <16 x float> %passthru) {
+define <16 x float> @ExpandLoadNoSanitize(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru) {
 ; CHECK-LABEL: @ExpandLoadNoSanitize(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(float* [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
-; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
+; CHECK-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x float> [[RET]]
 ;
 ; ADDR-LABEL: @ExpandLoadNoSanitize(
 ; ADDR-NEXT:    call void @llvm.donothing()
-; ADDR-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(float* [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
-; ADDR-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
+; ADDR-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
+; ADDR-NEXT:    store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; ADDR-NEXT:    ret <16 x float> [[RET]]
 ;
 ; ORIGINS-LABEL: @ExpandLoadNoSanitize(
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(float* [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
-; ORIGINS-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
-; ORIGINS-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; ORIGINS-NEXT:    [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(ptr [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
+; ORIGINS-NEXT:    store <16 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; ORIGINS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; ORIGINS-NEXT:    ret <16 x float> [[RET]]
 ;
-  %ret = call <16 x float> @llvm.masked.expandload.v16f32(float* %ptr, <16 x i1> %mask, <16 x float> %passthru)
+  %ret = call <16 x float> @llvm.masked.expandload.v16f32(ptr %ptr, <16 x i1> %mask, <16 x float> %passthru)
   ret <16 x float> %ret
 }
 
 ; FIXME: Provide real implementation.
-define void @CompressStore(<16 x float> %value, float* %ptr, <16 x i1> %mask) sanitize_memory {
+define void @CompressStore(<16 x float> %value, ptr %ptr, <16 x i1> %mask) sanitize_memory {
 ; CHECK-LABEL: @CompressStore(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to i32*
-; CHECK-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP1]], i32* [[TMP4]], <16 x i1> [[MASK:%.*]])
-; CHECK-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR]], <16 x i1> [[MASK]])
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP1]], ptr [[TMP4]], <16 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
 ; CHECK-NEXT:    ret void
 ;
 ; ADDR-LABEL: @CompressStore(
-; ADDR-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 64) to i64*), align 8
-; ADDR-NEXT:    [[TMP2:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <16 x i1>*), align 8
-; ADDR-NEXT:    [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i32>*), align 8
+; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; ADDR-NEXT:    [[TMP2:%.*]] = load <16 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; ADDR-NEXT:    [[TMP3:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
 ; ADDR-NEXT:    call void @llvm.donothing()
-; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
-; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to i32*
-; ADDR-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP3]], i32* [[TMP6]], <16 x i1> [[MASK:%.*]])
+; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; ADDR-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP3]], ptr [[TMP6]], <16 x i1> [[MASK:%.*]])
 ; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; ADDR-NEXT:    [[TMP7:%.*]] = bitcast <16 x i1> [[TMP2]] to i16
 ; ADDR-NEXT:    [[_MSCMP1:%.*]] = icmp ne i16 [[TMP7]], 0
@@ -509,58 +509,58 @@ define void @CompressStore(<16 x float> %value, float* %ptr, <16 x i1> %mask) sa
 ; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR7]]
 ; ADDR-NEXT:    unreachable
 ; ADDR:       9:
-; ADDR-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR]], <16 x i1> [[MASK]])
+; ADDR-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
 ; ADDR-NEXT:    ret void
 ;
 ; ORIGINS-LABEL: @CompressStore(
-; ORIGINS-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i32>*), align 8
-; ORIGINS-NEXT:    [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; ORIGINS-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
+; ORIGINS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP3:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; ORIGINS-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
-; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
+; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
 ; ORIGINS-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
 ; ORIGINS-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], -4
-; ORIGINS-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to i32*
-; ORIGINS-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP1]], i32* [[TMP5]], <16 x i1> [[MASK:%.*]])
-; ORIGINS-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR]], <16 x i1> [[MASK]])
+; ORIGINS-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; ORIGINS-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> [[TMP1]], ptr [[TMP5]], <16 x i1> [[MASK:%.*]])
+; ORIGINS-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
 ; ORIGINS-NEXT:    ret void
 ;
-  call void @llvm.masked.compressstore.v16f32(<16 x float> %value, float* %ptr, <16 x i1> %mask)
+  call void @llvm.masked.compressstore.v16f32(<16 x float> %value, ptr %ptr, <16 x i1> %mask)
   ret void
 }
 
-define void @CompressStoreNoSanitize(<16 x float> %value, float* %ptr, <16 x i1> %mask) {
+define void @CompressStoreNoSanitize(<16 x float> %value, ptr %ptr, <16 x i1> %mask) {
 ; CHECK-LABEL: @CompressStoreNoSanitize(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
-; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
-; CHECK-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, i32* [[TMP3]], <16 x i1> [[MASK:%.*]])
-; CHECK-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR]], <16 x i1> [[MASK]])
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
 ; CHECK-NEXT:    ret void
 ;
 ; ADDR-LABEL: @CompressStoreNoSanitize(
 ; ADDR-NEXT:    call void @llvm.donothing()
-; ADDR-NEXT:    [[TMP1:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; ADDR-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; ADDR-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
-; ADDR-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
-; ADDR-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, i32* [[TMP3]], <16 x i1> [[MASK:%.*]])
-; ADDR-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR]], <16 x i1> [[MASK]])
+; ADDR-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; ADDR-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]])
+; ADDR-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
 ; ADDR-NEXT:    ret void
 ;
 ; ORIGINS-LABEL: @CompressStoreNoSanitize(
 ; ORIGINS-NEXT:    call void @llvm.donothing()
-; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint float* [[PTR:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[PTR:%.*]] to i64
 ; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
-; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
+; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
 ; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
 ; ORIGINS-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], -4
-; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to i32*
-; ORIGINS-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, i32* [[TMP3]], <16 x i1> [[MASK:%.*]])
-; ORIGINS-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR]], <16 x i1> [[MASK]])
+; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; ORIGINS-NEXT:    call void @llvm.masked.compressstore.v16i32(<16 x i32> zeroinitializer, ptr [[TMP3]], <16 x i1> [[MASK:%.*]])
+; ORIGINS-NEXT:    call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], ptr [[PTR]], <16 x i1> [[MASK]])
 ; ORIGINS-NEXT:    ret void
 ;
-  call void @llvm.masked.compressstore.v16f32(<16 x float> %value, float* %ptr, <16 x i1> %mask)
+  call void @llvm.masked.compressstore.v16f32(<16 x float> %value, ptr %ptr, <16 x i1> %mask)
   ret void
 }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
index e9915e80eb590..1239698f3ac32 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_asm_conservative.ll
@@ -20,10 +20,10 @@ target triple = "x86_64-unknown-linux-gnu"
 @pair1 = common dso_local global %struct.pair zeroinitializer, align 4
 @c2 = common dso_local global i8 0, align 1
 @c1 = common dso_local global i8 0, align 1
- at memcpy_d1 = common dso_local global i8* (i8*, i8*, i32)* null, align 8
- at memcpy_d2 = common dso_local global i8* (i8*, i8*, i32)* null, align 8
- at memcpy_s1 = common dso_local global i8* (i8*, i8*, i32)* null, align 8
- at memcpy_s2 = common dso_local global i8* (i8*, i8*, i32)* null, align 8
+ at memcpy_d1 = common dso_local global ptr null, align 8
+ at memcpy_d2 = common dso_local global ptr null, align 8
+ at memcpy_s1 = common dso_local global ptr null, align 8
+ at memcpy_s2 = common dso_local global ptr null, align 8
 
 ; The functions below were generated from a C source that contains declarations like follows:
 ;   void f1() {
@@ -36,190 +36,180 @@ target triple = "x86_64-unknown-linux-gnu"
 ;   asm("" : "=r" (id1) : "r" (is1));
 define dso_local void @f_1i_1o_reg() sanitize_memory {
 entry:
-  %0 = load i32, i32* @is1, align 4
+  %0 = load i32, ptr @is1, align 4
   %1 = call i32 asm "", "=r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0)
-  store i32 %1, i32* @id1, align 4
+  store i32 %1, ptr @id1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @f_1i_1o_reg
-; CHECK: [[IS1_F1:%.*]] = load i32, i32* @is1, align 4
+; CHECK: [[IS1_F1:%.*]] = load i32, ptr @is1, align 4
 ; CHECK: call void @__msan_warning
 ; CHECK: call i32 asm "",{{.*}}(i32 [[IS1_F1]])
 ; CHECK: [[PACK1_F1:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id1{{.*}})
-; CHECK: [[EXT1_F1:%.*]] = extractvalue { i8*, i32* } [[PACK1_F1]], 0
-; CHECK: [[CAST1_F1:%.*]] = bitcast i8* [[EXT1_F1]] to i32*
-; CHECK: store i32 0, i32* [[CAST1_F1]]
+; CHECK: [[EXT1_F1:%.*]] = extractvalue { ptr, ptr } [[PACK1_F1]], 0
+; CHECK: store i32 0, ptr [[EXT1_F1]]
 
 
 ; Two input registers, two output registers:
 ;   asm("" : "=r" (id1), "=r" (id2) : "r" (is1), "r"(is2));
 define dso_local void @f_2i_2o_reg() sanitize_memory {
 entry:
-  %0 = load i32, i32* @is1, align 4
-  %1 = load i32, i32* @is2, align 4
+  %0 = load i32, ptr @is1, align 4
+  %1 = load i32, ptr @is2, align 4
   %2 = call { i32, i32 } asm "", "=r,=r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1)
   %asmresult = extractvalue { i32, i32 } %2, 0
   %asmresult1 = extractvalue { i32, i32 } %2, 1
-  store i32 %asmresult, i32* @id1, align 4
-  store i32 %asmresult1, i32* @id2, align 4
+  store i32 %asmresult, ptr @id1, align 4
+  store i32 %asmresult1, ptr @id2, align 4
   ret void
 }
 
 ; CHECK-LABEL: @f_2i_2o_reg
-; CHECK: [[IS1_F2:%.*]] = load i32, i32* @is1, align 4
-; CHECK: [[IS2_F2:%.*]] = load i32, i32* @is2, align 4
+; CHECK: [[IS1_F2:%.*]] = load i32, ptr @is1, align 4
+; CHECK: [[IS2_F2:%.*]] = load i32, ptr @is2, align 4
 ; CHECK: call void @__msan_warning
 ; CHECK: call void @__msan_warning
 ; CHECK: call { i32, i32 } asm "",{{.*}}(i32 [[IS1_F2]], i32 [[IS2_F2]])
 ; CHECK: [[PACK1_F2:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id1{{.*}})
-; CHECK: [[EXT1_F2:%.*]] = extractvalue { i8*, i32* } [[PACK1_F2]], 0
-; CHECK: [[CAST1_F2:%.*]] = bitcast i8* [[EXT1_F2]] to i32*
-; CHECK: store i32 0, i32* [[CAST1_F2]]
+; CHECK: [[EXT1_F2:%.*]] = extractvalue { ptr, ptr } [[PACK1_F2]], 0
+; CHECK: store i32 0, ptr [[EXT1_F2]]
 ; CHECK: [[PACK2_F2:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id2{{.*}})
-; CHECK: [[EXT2_F2:%.*]] = extractvalue { i8*, i32* } [[PACK2_F2]], 0
-; CHECK: [[CAST2_F2:%.*]] = bitcast i8* [[EXT2_F2]] to i32*
-; CHECK: store i32 0, i32* [[CAST2_F2]]
+; CHECK: [[EXT2_F2:%.*]] = extractvalue { ptr, ptr } [[PACK2_F2]], 0
+; CHECK: store i32 0, ptr [[EXT2_F2]]
 
 ; Input same as output, used twice:
 ;   asm("" : "=r" (id1), "=r" (id2) : "r" (id1), "r" (id2));
 define dso_local void @f_2i_2o_reuse2_reg() sanitize_memory {
 entry:
-  %0 = load i32, i32* @id1, align 4
-  %1 = load i32, i32* @id2, align 4
+  %0 = load i32, ptr @id1, align 4
+  %1 = load i32, ptr @id2, align 4
   %2 = call { i32, i32 } asm "", "=r,=r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1)
   %asmresult = extractvalue { i32, i32 } %2, 0
   %asmresult1 = extractvalue { i32, i32 } %2, 1
-  store i32 %asmresult, i32* @id1, align 4
-  store i32 %asmresult1, i32* @id2, align 4
+  store i32 %asmresult, ptr @id1, align 4
+  store i32 %asmresult1, ptr @id2, align 4
   ret void
 }
 
 ; CHECK-LABEL: @f_2i_2o_reuse2_reg
-; CHECK: [[ID1_F3:%.*]] = load i32, i32* @id1, align 4
-; CHECK: [[ID2_F3:%.*]] = load i32, i32* @id2, align 4
+; CHECK: [[ID1_F3:%.*]] = load i32, ptr @id1, align 4
+; CHECK: [[ID2_F3:%.*]] = load i32, ptr @id2, align 4
 ; CHECK: call void @__msan_warning
 ; CHECK: call void @__msan_warning
 ; CHECK: call { i32, i32 } asm "",{{.*}}(i32 [[ID1_F3]], i32 [[ID2_F3]])
 ; CHECK: [[PACK1_F3:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id1{{.*}})
-; CHECK: [[EXT1_F3:%.*]] = extractvalue { i8*, i32* } [[PACK1_F3]], 0
-; CHECK: [[CAST1_F3:%.*]] = bitcast i8* [[EXT1_F3]] to i32*
-; CHECK: store i32 0, i32* [[CAST1_F3]]
+; CHECK: [[EXT1_F3:%.*]] = extractvalue { ptr, ptr } [[PACK1_F3]], 0
+; CHECK: store i32 0, ptr [[EXT1_F3]]
 ; CHECK: [[PACK2_F3:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id2{{.*}})
-; CHECK: [[EXT2_F3:%.*]] = extractvalue { i8*, i32* } [[PACK2_F3]], 0
-; CHECK: [[CAST2_F3:%.*]] = bitcast i8* [[EXT2_F3]] to i32*
-; CHECK: store i32 0, i32* [[CAST2_F3]]
+; CHECK: [[EXT2_F3:%.*]] = extractvalue { ptr, ptr } [[PACK2_F3]], 0
+; CHECK: store i32 0, ptr [[EXT2_F3]]
 
 
 ; One of the input registers is also an output:
 ;   asm("" : "=r" (id1), "=r" (id2) : "r" (id1), "r"(is1));
 define dso_local void @f_2i_2o_reuse1_reg() sanitize_memory {
 entry:
-  %0 = load i32, i32* @id1, align 4
-  %1 = load i32, i32* @is1, align 4
+  %0 = load i32, ptr @id1, align 4
+  %1 = load i32, ptr @is1, align 4
   %2 = call { i32, i32 } asm "", "=r,=r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1)
   %asmresult = extractvalue { i32, i32 } %2, 0
   %asmresult1 = extractvalue { i32, i32 } %2, 1
-  store i32 %asmresult, i32* @id1, align 4
-  store i32 %asmresult1, i32* @id2, align 4
+  store i32 %asmresult, ptr @id1, align 4
+  store i32 %asmresult1, ptr @id2, align 4
   ret void
 }
 
 ; CHECK-LABEL: @f_2i_2o_reuse1_reg
-; CHECK: [[ID1_F4:%.*]] = load i32, i32* @id1, align 4
-; CHECK: [[IS1_F4:%.*]] = load i32, i32* @is1, align 4
+; CHECK: [[ID1_F4:%.*]] = load i32, ptr @id1, align 4
+; CHECK: [[IS1_F4:%.*]] = load i32, ptr @is1, align 4
 ; CHECK: call void @__msan_warning
 ; CHECK: call void @__msan_warning
 ; CHECK: call { i32, i32 } asm "",{{.*}}(i32 [[ID1_F4]], i32 [[IS1_F4]])
 ; CHECK: [[PACK1_F4:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id1{{.*}})
-; CHECK: [[EXT1_F4:%.*]] = extractvalue { i8*, i32* } [[PACK1_F4]], 0
-; CHECK: [[CAST1_F4:%.*]] = bitcast i8* [[EXT1_F4]] to i32*
-; CHECK: store i32 0, i32* [[CAST1_F4]]
+; CHECK: [[EXT1_F4:%.*]] = extractvalue { ptr, ptr } [[PACK1_F4]], 0
+; CHECK: store i32 0, ptr [[EXT1_F4]]
 ; CHECK: [[PACK2_F4:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id2{{.*}})
-; CHECK: [[EXT2_F4:%.*]] = extractvalue { i8*, i32* } [[PACK2_F4]], 0
-; CHECK: [[CAST2_F4:%.*]] = bitcast i8* [[EXT2_F4]] to i32*
-; CHECK: store i32 0, i32* [[CAST2_F4]]
+; CHECK: [[EXT2_F4:%.*]] = extractvalue { ptr, ptr } [[PACK2_F4]], 0
+; CHECK: store i32 0, ptr [[EXT2_F4]]
 
 
 ; One input register, three output registers:
 ;   asm("" : "=r" (id1), "=r" (id2), "=r" (id3) : "r" (is1));
 define dso_local void @f_1i_3o_reg() sanitize_memory {
 entry:
-  %0 = load i32, i32* @is1, align 4
+  %0 = load i32, ptr @is1, align 4
   %1 = call { i32, i32, i32 } asm "", "=r,=r,=r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0)
   %asmresult = extractvalue { i32, i32, i32 } %1, 0
   %asmresult1 = extractvalue { i32, i32, i32 } %1, 1
   %asmresult2 = extractvalue { i32, i32, i32 } %1, 2
-  store i32 %asmresult, i32* @id1, align 4
-  store i32 %asmresult1, i32* @id2, align 4
-  store i32 %asmresult2, i32* @id3, align 4
+  store i32 %asmresult, ptr @id1, align 4
+  store i32 %asmresult1, ptr @id2, align 4
+  store i32 %asmresult2, ptr @id3, align 4
   ret void
 }
 
 ; CHECK-LABEL: @f_1i_3o_reg
-; CHECK: [[IS1_F5:%.*]] = load i32, i32* @is1, align 4
+; CHECK: [[IS1_F5:%.*]] = load i32, ptr @is1, align 4
 ; CHECK: call void @__msan_warning
 ; CHECK: call { i32, i32, i32 } asm "",{{.*}}(i32 [[IS1_F5]])
 ; CHECK: [[PACK1_F5:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id1{{.*}})
-; CHECK: [[EXT1_F5:%.*]] = extractvalue { i8*, i32* } [[PACK1_F5]], 0
-; CHECK: [[CAST1_F5:%.*]] = bitcast i8* [[EXT1_F5]] to i32*
-; CHECK: store i32 0, i32* [[CAST1_F5]]
+; CHECK: [[EXT1_F5:%.*]] = extractvalue { ptr, ptr } [[PACK1_F5]], 0
+; CHECK: store i32 0, ptr [[EXT1_F5]]
 ; CHECK: [[PACK2_F5:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id2{{.*}})
-; CHECK: [[EXT2_F5:%.*]] = extractvalue { i8*, i32* } [[PACK2_F5]], 0
-; CHECK: [[CAST2_F5:%.*]] = bitcast i8* [[EXT2_F5]] to i32*
-; CHECK: store i32 0, i32* [[CAST2_F5]]
+; CHECK: [[EXT2_F5:%.*]] = extractvalue { ptr, ptr } [[PACK2_F5]], 0
+; CHECK: store i32 0, ptr [[EXT2_F5]]
 ; CHECK: [[PACK3_F5:%.*]] = call {{.*}} @__msan_metadata_ptr_for_store_4({{.*}}@id3{{.*}})
-; CHECK: [[EXT3_F5:%.*]] = extractvalue { i8*, i32* } [[PACK3_F5]], 0
-; CHECK: [[CAST3_F5:%.*]] = bitcast i8* [[EXT3_F5]] to i32*
-; CHECK: store i32 0, i32* [[CAST3_F5]]
+; CHECK: [[EXT3_F5:%.*]] = extractvalue { ptr, ptr } [[PACK3_F5]], 0
+; CHECK: store i32 0, ptr [[EXT3_F5]]
 
 
 ; 2 input memory args, 2 output memory args:
 ;  asm("" : "=m" (id1), "=m" (id2) : "m" (is1), "m"(is2))
 define dso_local void @f_2i_2o_mem() sanitize_memory {
 entry:
-  call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id1, i32* elementtype(i32) @id2, i32* elementtype(i32) @is1, i32* elementtype(i32) @is2)
+  call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) @id1, ptr elementtype(i32) @id2, ptr elementtype(i32) @is1, ptr elementtype(i32) @is2)
   ret void
 }
 
 ; CHECK-LABEL: @f_2i_2o_mem
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id1{{.*}}, i64 4)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id2{{.*}}, i64 4)
-; CHECK: call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id1, i32* elementtype(i32) @id2, i32* elementtype(i32) @is1, i32* elementtype(i32) @is2)
+; CHECK: call void asm "", "=*m,=*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) @id1, ptr elementtype(i32) @id2, ptr elementtype(i32) @is1, ptr elementtype(i32) @is2)
 
 
 ; Same input and output passed as both memory and register:
 ;  asm("" : "=r" (id1), "=m"(id1) : "r"(is1), "m"(is1));
 define dso_local void @f_1i_1o_memreg() sanitize_memory {
 entry:
-  %0 = load i32, i32* @is1, align 4
-  %1 = call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id1, i32 %0, i32* elementtype(i32) @is1)
-  store i32 %1, i32* @id1, align 4
+  %0 = load i32, ptr @is1, align 4
+  %1 = call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) @id1, i32 %0, ptr elementtype(i32) @is1)
+  store i32 %1, ptr @id1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @f_1i_1o_memreg
-; CHECK: [[IS1_F7:%.*]] = load i32, i32* @is1, align 4
+; CHECK: [[IS1_F7:%.*]] = load i32, ptr @is1, align 4
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id1{{.*}}, i64 4)
 ; CHECK: call void @__msan_warning
-; CHECK: call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id1, i32 [[IS1_F7]], i32* elementtype(i32) @is1)
+; CHECK: call i32 asm "", "=r,=*m,r,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) @id1, i32 [[IS1_F7]], ptr elementtype(i32) @is1)
 
 
 ; Three outputs, first and last returned via regs, second via mem:
 ;  asm("" : "=r" (id1), "=m"(id2), "=r" (id3):);
 define dso_local void @f_3o_reg_mem_reg() sanitize_memory {
 entry:
-  %0 = call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id2)
+  %0 = call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) @id2)
   %asmresult = extractvalue { i32, i32 } %0, 0
   %asmresult1 = extractvalue { i32, i32 } %0, 1
-  store i32 %asmresult, i32* @id1, align 4
-  store i32 %asmresult1, i32* @id3, align 4
+  store i32 %asmresult, ptr @id1, align 4
+  store i32 %asmresult1, ptr @id3, align 4
   ret void
 }
 
 ; CHECK-LABEL: @f_3o_reg_mem_reg
-; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@id2{{.*}}), i64 4)
-; CHECK: call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) @id2)
+; CHECK-CONS: call void @__msan_instrument_asm_store(ptr @id2, i64 4)
+; CHECK: call { i32, i32 } asm "", "=r,=*m,=r,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) @id2)
 
 
 ; Three inputs and three outputs of 
diff erent types: a pair, a char, a function pointer.
@@ -227,14 +217,14 @@ entry:
 ;  asm("" : "=r" (pair2), "=r" (c2), "=r" (memcpy_d1) : "r"(pair1), "r"(c1), "r"(memcpy_s1));
 define dso_local void @f_3i_3o_complex_reg() sanitize_memory {
 entry:
-  %0 = load i64, i64* bitcast (%struct.pair* @pair1 to i64*), align 4
-  %1 = load i8, i8* @c1, align 1
-  %2 = load i8* (i8*, i8*, i32)*, i8* (i8*, i8*, i32)** @memcpy_s1, align 8
-  %3 = call { i8, i8* (i8*, i8*, i32)* } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(%struct.pair* elementtype(%struct.pair) @pair2, i64 %0, i8 %1, i8* (i8*, i8*, i32)* %2)
-  %asmresult = extractvalue { i8, i8* (i8*, i8*, i32)* } %3, 0
-  %asmresult1 = extractvalue { i8, i8* (i8*, i8*, i32)* } %3, 1
-  store i8 %asmresult, i8* @c2, align 1
-  store i8* (i8*, i8*, i32)* %asmresult1, i8* (i8*, i8*, i32)** @memcpy_d1, align 8
+  %0 = load i64, ptr @pair1, align 4
+  %1 = load i8, ptr @c1, align 1
+  %2 = load ptr, ptr @memcpy_s1, align 8
+  %3 = call { i8, ptr } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(%struct.pair) @pair2, i64 %0, i8 %1, ptr %2)
+  %asmresult = extractvalue { i8, ptr } %3, 0
+  %asmresult1 = extractvalue { i8, ptr } %3, 1
+  store i8 %asmresult, ptr @c2, align 1
+  store ptr %asmresult1, ptr @memcpy_d1, align 8
   ret void
 }
 
@@ -246,14 +236,14 @@ entry:
 ; CHECK: call void @__msan_warning
 ; CHECK: call void @__msan_warning
 ; CHECK: call void @__msan_warning
-; CHECK: call { i8, i8* (i8*, i8*, i32)* } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(%struct.pair* elementtype(%struct.pair) @pair2, {{.*}}[[PAIR1_F9]], i8 [[C1_F9]], {{.*}} [[MEMCPY_S1_F9]])
+; CHECK: call { i8, ptr } asm "", "=*r,=r,=r,r,r,r,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(%struct.pair) @pair2, {{.*}}[[PAIR1_F9]], i8 [[C1_F9]], {{.*}} [[MEMCPY_S1_F9]])
 
 ; Three inputs and three outputs of 
diff erent types: a pair, a char, a function pointer.
 ; Everything is passed in memory:
 ;  asm("" : "=m" (pair2), "=m" (c2), "=m" (memcpy_d1) : "m"(pair1), "m"(c1), "m"(memcpy_s1));
 define dso_local void @f_3i_3o_complex_mem() sanitize_memory {
 entry:
-  call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(%struct.pair* elementtype(%struct.pair) @pair2, i8* elementtype(i8) @c2, i8* (i8*, i8*, i32)** elementtype(i8* (i8*, i8*, i32)*) @memcpy_d1, %struct.pair* elementtype(%struct.pair) @pair1, i8* elementtype(i8) @c1, i8* (i8*, i8*, i32)** elementtype(i8* (i8*, i8*, i32)*) @memcpy_s1)
+  call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(%struct.pair) @pair2, ptr elementtype(i8) @c2, ptr elementtype(ptr) @memcpy_d1, ptr elementtype(%struct.pair) @pair1, ptr elementtype(i8) @c1, ptr elementtype(ptr) @memcpy_s1)
   ret void
 }
 
@@ -261,7 +251,7 @@ entry:
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@pair2{{.*}}, i64 8)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@c2{{.*}}, i64 1)
 ; CHECK-CONS: call void @__msan_instrument_asm_store({{.*}}@memcpy_d1{{.*}}, i64 8)
-; CHECK: call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(%struct.pair* elementtype(%struct.pair) @pair2, i8* elementtype(i8) @c2, i8* (i8*, i8*, i32)** elementtype(i8* (i8*, i8*, i32)*) @memcpy_d1, %struct.pair* elementtype(%struct.pair) @pair1, i8* elementtype(i8) @c1, i8* (i8*, i8*, i32)** elementtype(i8* (i8*, i8*, i32)*) @memcpy_s1)
+; CHECK: call void asm "", "=*m,=*m,=*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(%struct.pair) @pair2, ptr elementtype(i8) @c2, ptr elementtype(ptr) @memcpy_d1, ptr elementtype(%struct.pair) @pair1, ptr elementtype(i8) @c1, ptr elementtype(ptr) @memcpy_s1)
 
 
 ; A simple asm goto construct to check that callbr is handled correctly:

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
index 06712b4f0eaae..29a70fd073647 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll
@@ -7,8 +7,8 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void ()* @msan.module_ctor to i8*)]
-; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null }
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @msan.module_ctor]
+; CHECK: @llvm.global_ctors {{.*}} { i32 0, ptr @msan.module_ctor, ptr null }
 
 ; Check the presence and the linkage type of __msan_track_origins and
 ; other interface symbols.
@@ -25,9 +25,9 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; Check instrumentation of stores
 
-define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
+define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i32 %x, i32* %p, align 4
+  store i32 %x, ptr %p, align 4
   ret void
 }
 
@@ -49,9 +49,9 @@ entry:
 ; Shadow store has the same alignment as the original store; origin store
 ; does not specify explicit alignment.
 
-define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
+define void @AlignedStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i32 %x, i32* %p, align 32
+  store i32 %x, ptr %p, align 32
   ret void
 }
 
@@ -70,9 +70,9 @@ entry:
 
 
 ; load followed by cmp: check that we load the shadow and call __msan_warning_with_origin.
-define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
+define void @LoadAndCmp(ptr nocapture %a) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %if.end, label %if.then
 
@@ -108,10 +108,10 @@ entry:
 ; CHECK: ret i32
 
 ; Check that we get the shadow for the retval.
-define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
+define void @CopyRetVal(ptr nocapture %a) nounwind uwtable sanitize_memory {
 entry:
   %call = tail call i32 @ReturnInt() nounwind
-  store i32 %call, i32* %a, align 4
+  store i32 %call, ptr %a, align 4
   ret void
 }
 
@@ -123,22 +123,22 @@ entry:
 
 
 ; Check that we generate PHIs for shadow.
-define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
+define void @FuncWithPhi(ptr nocapture %a, ptr %b, ptr nocapture %c) nounwind uwtable sanitize_memory {
 entry:
-  %tobool = icmp eq i32* %b, null
+  %tobool = icmp eq ptr %b, null
   br i1 %tobool, label %if.else, label %if.then
 
   if.then:                                          ; preds = %entry
-  %0 = load i32, i32* %b, align 4
+  %0 = load i32, ptr %b, align 4
   br label %if.end
 
   if.else:                                          ; preds = %entry
-  %1 = load i32, i32* %c, align 4
+  %1 = load i32, ptr %c, align 4
   br label %if.end
 
   if.end:                                           ; preds = %if.else, %if.then
   %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
-  store i32 %t.0, i32* %a, align 4
+  store i32 %t.0, ptr %a, align 4
   ret void
 }
 
@@ -152,11 +152,11 @@ entry:
 ; CHECK: ret void
 
 ; Compute shadow for "x << 10"
-define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
+define void @ShlConst(ptr nocapture %x) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i32, i32* %x, align 4
+  %0 = load i32, ptr %x, align 4
   %1 = shl i32 %0, 10
-  store i32 %1, i32* %x, align 4
+  store i32 %1, ptr %x, align 4
   ret void
 }
 
@@ -170,11 +170,11 @@ entry:
 ; CHECK: ret void
 
 ; Compute shadow for "10 << x": it should have 'sext i1'.
-define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
+define void @ShlNonConst(ptr nocapture %x) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i32, i32* %x, align 4
+  %0 = load i32, ptr %x, align 4
   %1 = shl i32 10, %0
-  store i32 %1, i32* %x, align 4
+  store i32 %1, ptr %x, align 4
   ret void
 }
 
@@ -187,11 +187,11 @@ entry:
 ; CHECK: ret void
 
 ; SExt
-define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
+define void @SExt(ptr nocapture %a, ptr nocapture %b) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i16, i16* %b, align 2
+  %0 = load i16, ptr %b, align 2
   %1 = sext i16 %0 to i32
-  store i32 %1, i32* %a, align 4
+  store i32 %1, ptr %a, align 4
   ret void
 }
 
@@ -206,69 +206,69 @@ entry:
 
 
 ; memset
-define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
+define void @MemSet(ptr nocapture %x) nounwind uwtable sanitize_memory {
 entry:
-  call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i1 false)
+  call void @llvm.memset.p0.i64(ptr %x, i8 42, i64 10, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 
 ; CHECK-LABEL: @MemSet
-; CHECK: call i8* @__msan_memset
+; CHECK: call ptr @__msan_memset
 ; CHECK: ret void
 
 
 ; memcpy
-define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
+define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 ; CHECK-LABEL: @MemCpy
-; CHECK: call i8* @__msan_memcpy
+; CHECK: call ptr @__msan_memcpy
 ; CHECK: ret void
 
 ; memset.inline
-define void @MemSetInline(i8* nocapture %x) nounwind uwtable sanitize_memory {
+define void @MemSetInline(ptr nocapture %x) nounwind uwtable sanitize_memory {
 entry:
-  call void @llvm.memset.inline.p0i8.i64(i8* %x, i8 42, i64 10, i1 false)
+  call void @llvm.memset.inline.p0.i64(ptr %x, i8 42, i64 10, i1 false)
   ret void
 }
 
-declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 
 ; CHECK-LABEL: @MemSetInline
-; CHECK: call i8* @__msan_memset
+; CHECK: call ptr @__msan_memset
 ; CHECK: ret void
 
 ; memcpy.inline
-define void @MemCpyInline(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
+define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 ; CHECK-LABEL: @MemCpyInline
-; CHECK: call i8* @__msan_memcpy
+; CHECK: call ptr @__msan_memcpy
 ; CHECK: ret void
 
 ; memmove is lowered to a call
-define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
+define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false)
   ret void
 }
 
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 ; CHECK-LABEL: @MemMove
-; CHECK: call i8* @__msan_memmove
+; CHECK: call ptr @__msan_memmove
 ; CHECK: ret void
 
 ;; ------------
@@ -276,34 +276,34 @@ declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
 ;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
 ;; verify that MSAN handles these intrinsics properly once they have been
 ;; added to that class hierarchy.
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
-declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture writeonly, i8, i64, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32) nounwind
 
-define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
+define void @atomic_memcpy(ptr nocapture %x, ptr nocapture %y) nounwind {
   ; CHECK-LABEL: atomic_memcpy
   ; CHECK-NEXT: call void @llvm.donothing
-  ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
+  ; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1)
   ; CHECK-NEXT: ret void
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
+  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1)
   ret void
 }
 
-define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
+define void @atomic_memmove(ptr nocapture %x, ptr nocapture %y) nounwind {
   ; CHECK-LABEL: atomic_memmove
   ; CHECK-NEXT: call void @llvm.donothing
-  ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
+  ; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1)
   ; CHECK-NEXT: ret void
-  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
+  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1)
   ret void
 }
 
-define void @atomic_memset(i8* nocapture %x) nounwind {
+define void @atomic_memset(ptr nocapture %x) nounwind {
   ; CHECK-LABEL: atomic_memset
   ; CHECK-NEXT: call void @llvm.donothing
-  ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
+  ; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %x, i8 88, i64 16, i32 1)
   ; CHECK-NEXT: ret void
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %x, i8 88, i64 16, i32 1)
   ret void
 }
 
@@ -393,10 +393,10 @@ entry:
 ; CHECK: ret { i64, i64 }
 
 
-define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory {
+define { ptr, double } @SelectStruct2(i1 zeroext %x, { ptr, double } %a, { ptr, double } %b) readnone sanitize_memory {
 entry:
-  %c = select i1 %x, { i64*, double } %a, { i64*, double } %b
-  ret { i64*, double } %c
+  %c = select i1 %x, { ptr, double } %a, { ptr, double } %b
+  ret { ptr, double } %c
 }
 
 ; CHECK-LABEL: @SelectStruct2
@@ -404,37 +404,37 @@ entry:
 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
 ; ORIGINS: select i1
 ; ORIGINS: select i1
-; CHECK-NEXT: select i1 {{.*}}, { i64*, double }
-; CHECK: ret { i64*, double }
+; CHECK-NEXT: select i1 {{.*}}, { ptr, double }
+; CHECK: ret { ptr, double }
 
 
-define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
+define ptr @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
 entry:
-  %0 = inttoptr i64 %x to i8*
-  ret i8* %0
+  %0 = inttoptr i64 %x to ptr
+  ret ptr %0
 }
 
 ; CHECK-LABEL: @IntToPtr
-; CHECK: load i64, i64*{{.*}}__msan_param_tls
-; ORIGINS-NEXT: load i32, i32*{{.*}}__msan_param_origin_tls
+; CHECK: load i64, ptr{{.*}}__msan_param_tls
+; ORIGINS-NEXT: load i32, ptr{{.*}}__msan_param_origin_tls
 ; CHECK-NEXT: call void @llvm.donothing
 ; CHECK-NEXT: inttoptr
 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
-; CHECK: ret i8*
+; CHECK: ret ptr
 
 
-define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
+define ptr @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
 entry:
-  %0 = inttoptr i16 %x to i8*
-  ret i8* %0
+  %0 = inttoptr i16 %x to ptr
+  ret ptr %0
 }
 
 ; CHECK-LABEL: @IntToPtr_ZExt
-; CHECK: load i16, i16*{{.*}}__msan_param_tls
+; CHECK: load i16, ptr{{.*}}__msan_param_tls
 ; CHECK: zext
 ; CHECK-NEXT: inttoptr
 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
-; CHECK: ret i8*
+; CHECK: ret ptr
 
 
 ; Check that we insert exactly one check on udiv
@@ -468,7 +468,7 @@ entry:
 ; CHECK: %[[SB:.*]] = load i32,{{.*}}@__msan_param_tls
 ; CHECK: %[[SC:.*]] = or i32 %[[SA]], %[[SB]]
 ; CHECK: = fdiv float
-; CHECK: store i32 %[[SC]], i32* {{.*}}@__msan_retval_tls
+; CHECK: store i32 %[[SC]], ptr {{.*}}@__msan_retval_tls
 ; CHECK: ret float
 
 ; Check that fneg simply propagates shadow.
@@ -483,7 +483,7 @@ entry:
 ; CHECK: %[[SA:.*]] = load i32,{{.*}}@__msan_param_tls
 ; ORIGINS: %[[SB:.*]] = load i32,{{.*}}@__msan_param_origin_tls
 ; CHECK: = fneg float
-; CHECK: store i32 %[[SA]], i32* {{.*}}@__msan_retval_tls
+; CHECK: store i32 %[[SA]], ptr {{.*}}@__msan_retval_tls
 ; ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
 ; CHECK: ret float
 
@@ -592,15 +592,15 @@ define zeroext i1 @ICmpSLEAllOnes(i32 %x) nounwind uwtable readnone sanitize_mem
 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
 ; of the vector arguments.
 
-define <2 x i1> @ICmpSLT_vector_Zero(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
-  %1 = icmp slt <2 x i32*> %x, zeroinitializer
+define <2 x i1> @ICmpSLT_vector_Zero(<2 x ptr> %x) nounwind uwtable readnone sanitize_memory {
+  %1 = icmp slt <2 x ptr> %x, zeroinitializer
   ret <2 x i1> %1
 }
 
 ; CHECK-LABEL: @ICmpSLT_vector_Zero
 ; CHECK: icmp slt <2 x i64>
 ; CHECK-NOT: call void @__msan_warning
-; CHECK: icmp slt <2 x i32*>
+; CHECK: icmp slt <2 x ptr>
 ; CHECK-NOT: call void @__msan_warning
 ; CHECK: ret <2 x i1>
 
@@ -644,25 +644,25 @@ entry:
 
 define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
   %y = alloca i32, align 64
-  %1 = load volatile i32, i32* %y, align 64
+  %1 = load volatile i32, ptr %y, align 64
   ret i32 %1
 }
 
 ; CHECK-LABEL: @ShadowLoadAlignmentLarge
-; CHECK: load volatile i32, i32* {{.*}} align 64
-; CHECK: load i32, i32* {{.*}} align 64
+; CHECK: load volatile i32, ptr {{.*}} align 64
+; CHECK: load i32, ptr {{.*}} align 64
 ; CHECK: ret i32
 
 define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
   %y = alloca i32, align 2
-  %1 = load volatile i32, i32* %y, align 2
+  %1 = load volatile i32, ptr %y, align 2
   ret i32 %1
 }
 
 ; CHECK-LABEL: @ShadowLoadAlignmentSmall
-; CHECK: load volatile i32, i32* {{.*}} align 2
-; CHECK: load i32, i32* {{.*}} align 2
-; ORIGINS: load i32, i32* {{.*}} align 4
+; CHECK: load volatile i32, ptr {{.*}} align 2
+; CHECK: load i32, ptr {{.*}} align 2
+; ORIGINS: load i32, ptr {{.*}} align 4
 ; CHECK: ret i32
 
 
@@ -726,46 +726,44 @@ declare i32 @llvm.bswap.i32(i32) nounwind readnone
 ; Test handling of vectors of pointers.
 ; Check that shadow of such vector is a vector of integers.
 
-define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
-  %x = load <8 x i8*>, <8 x i8*>* %p
-  ret <8 x i8*> %x
+define <8 x ptr> @VectorOfPointers(ptr %p) nounwind uwtable sanitize_memory {
+  %x = load <8 x ptr>, ptr %p
+  ret <8 x ptr> %x
 }
 
 ; CHECK-LABEL: @VectorOfPointers
-; CHECK: load <8 x i8*>, <8 x i8*>*
-; CHECK: load <8 x i64>, <8 x i64>*
+; CHECK: load <8 x ptr>, ptr
+; CHECK: load <8 x i64>, ptr
 ; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
-; CHECK: ret <8 x i8*>
+; CHECK: ret <8 x ptr>
 
 ; Test handling of va_copy.
 
-declare void @llvm.va_copy(i8*, i8*) nounwind
+declare void @llvm.va_copy(ptr, ptr) nounwind
 
-define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
-  call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
+define void @VACopy(ptr %p1, ptr %p2) nounwind uwtable sanitize_memory {
+  call void @llvm.va_copy(ptr %p1, ptr %p2) nounwind
   ret void
 }
 
 ; CHECK-LABEL: @VACopy
-; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i1 false)
+; CHECK: call void @llvm.memset.p0.i64({{.*}}, i8 0, i64 24, i1 false)
 ; CHECK: ret void
 
 
 ; Test that va_start instrumentation does not use va_arg_tls*.
 ; It should work with a local stack copy instead.
 
-%struct.__va_list_tag = type { i32, i32, i8*, i8* }
-declare void @llvm.va_start(i8*) nounwind
+%struct.__va_list_tag = type { i32, i32, ptr, ptr }
+declare void @llvm.va_start(ptr) nounwind
 
 ; Function Attrs: nounwind uwtable
 define void @VAStart(i32 %x, ...) sanitize_memory {
 entry:
   %x.addr = alloca i32, align 4
   %va = alloca [1 x %struct.__va_list_tag], align 16
-  store i32 %x, i32* %x.addr, align 4
-  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
-  %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
-  call void @llvm.va_start(i8* %arraydecay1)
+  store i32 %x, ptr %x.addr, align 4
+  call void @llvm.va_start(ptr %va)
   ret void
 }
 
@@ -779,9 +777,9 @@ entry:
 ; Test handling of volatile stores.
 ; Check that MemorySanitizer does not add a check of the value being stored.
 
-define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
+define void @VolatileStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
 entry:
-  store volatile i32 %x, i32* %p, align 4
+  store volatile i32 %x, ptr %p, align 4
   ret void
 }
 
@@ -821,15 +819,15 @@ declare void @bar()
 define i32 @NoSanitizeMemoryAlloca() {
 entry:
   %p = alloca i32, align 4
-  %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
+  %x = call i32 @NoSanitizeMemoryAllocaHelper(ptr %p)
   ret i32 %x
 }
 
-declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
+declare i32 @NoSanitizeMemoryAllocaHelper(ptr %p)
 
 ; CHECK-LABEL: @NoSanitizeMemoryAlloca
-; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 0, i64 4, i1 false)
-; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
+; CHECK: call void @llvm.memset.p0.i64(ptr align 4 {{.*}}, i8 0, i64 4, i1 false)
+; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(ptr
 ; CHECK: ret i32
 
 
@@ -845,7 +843,7 @@ entry:
 declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
 
 ; CHECK-LABEL: @NoSanitizeMemoryUndef
-; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
+; CHECK: store i32 0, ptr @__msan_param_tls
 ; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
 ; CHECK: ret i32
 
@@ -869,7 +867,7 @@ cond.end:                                         ; preds = %cond.false, %cond.t
 }
 
 ; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
-; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls
+; CHECK: store i32 0, ptr @__msan_retval_tls
 ; CHECK: ret i32 [[A]]
 
 
@@ -877,9 +875,9 @@ cond.end:                                         ; preds = %cond.false, %cond.t
 ; argument shadow is a compile-time zero constant (which is always the case
 ; in functions missing sanitize_memory attribute).
 
-define i32 @NoSanitizeMemoryParamTLS(i32* nocapture readonly %x) {
+define i32 @NoSanitizeMemoryParamTLS(ptr nocapture readonly %x) {
 entry:
-  %0 = load i32, i32* %x, align 4
+  %0 = load i32, ptr %x, align 4
   %call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
   ret i32 %call
 }
@@ -899,8 +897,8 @@ entry:
 }
 
 ; CHECK-LABEL: @ArgumentShadowAlignment
-; CHECK: load <2 x i64>, <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
-; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
+; CHECK: load <2 x i64>, ptr {{.*}} @__msan_param_tls {{.*}}, align 8
+; CHECK: store <2 x i64> {{.*}}, ptr @__msan_retval_tls, align 8
 ; CHECK: ret <2 x i64>
 
 
@@ -937,18 +935,14 @@ entry:
 
 declare void @VAArgStructFn(i32 %guard, ...)
 
-define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory {
+define void @VAArgStruct(ptr nocapture %s) sanitize_memory {
 entry:
   %agg.tmp2 = alloca %struct.StructByVal, align 8
-  %0 = bitcast %struct.StructByVal* %s to i8*
-  %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
-  %agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
-  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
-  %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
-  %agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
-  %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
-  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval(%struct.StructByVal) align 8 %agg.tmp2)
+  %agg.tmp.sroa.0.0.copyload = load i64, ptr %s, align 4
+  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, ptr %s, i64 0, i32 2
+  %agg.tmp.sroa.2.0.copyload = load i64, ptr %agg.tmp.sroa.2.0..sroa_idx, align 4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %agg.tmp2, ptr align 4 %s, i64 16, i1 false)
+  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, ptr byval(%struct.StructByVal) align 8 %agg.tmp2)
   ret void
 }
 
@@ -958,38 +952,34 @@ entry:
 ; CHECK-LABEL: @VAArgStruct
 ; undef not stored to __msan_va_arg_tls - it's a fixed argument
 ; first struct through general purpose registers
-; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
-; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
+; CHECK: store i64 {{.*}}, ptr {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
+; CHECK: store i64 {{.*}}, ptr {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
 ; second struct through general purpose registers
-; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
-; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
+; CHECK: store i64 {{.*}}, ptr {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
+; CHECK: store i64 {{.*}}, ptr {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
 ; third struct through the overflow area byval
-; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64{{.*}}@__msan_va_arg_tls {{.*}}, i64 176
-; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls
+; CHECK: ptrtoint ptr {{.*}} to i64
+; CHECK: call void @llvm.memcpy.p0.p0.i64{{.*}}@__msan_va_arg_tls {{.*}}, i64 176
+; CHECK: store i64 16, ptr @__msan_va_arg_overflow_size_tls
 ; CHECK: call void (i32, ...) @VAArgStructFn
 ; CHECK: ret void
 
 ; Same code compiled without SSE (see attributes below).
 ; The register save area is only 48 bytes instead of 176.
-define void @VAArgStructNoSSE(%struct.StructByVal* nocapture %s) sanitize_memory #0 {
+define void @VAArgStructNoSSE(ptr nocapture %s) sanitize_memory #0 {
 entry:
   %agg.tmp2 = alloca %struct.StructByVal, align 8
-  %0 = bitcast %struct.StructByVal* %s to i8*
-  %agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
-  %agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
-  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
-  %agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
-  %agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
-  %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
-  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval(%struct.StructByVal) align 8 %agg.tmp2)
+  %agg.tmp.sroa.0.0.copyload = load i64, ptr %s, align 4
+  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, ptr %s, i64 0, i32 2
+  %agg.tmp.sroa.2.0.copyload = load i64, ptr %agg.tmp.sroa.2.0..sroa_idx, align 4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %agg.tmp2, ptr align 4 %s, i64 16, i1 false)
+  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, ptr byval(%struct.StructByVal) align 8 %agg.tmp2)
   ret void
 }
 
 attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
 
-; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64{{.*}}@__msan_va_arg_tls {{.*}}, i64 48
+; CHECK: call void @llvm.memcpy.p0.p0.i64{{.*}}@__msan_va_arg_tls {{.*}}, i64 48
 
 declare i32 @InnerTailCall(i32 %a)
 
@@ -1022,35 +1012,33 @@ define i32 @CallMustTailCall(i32 %a) sanitize_memory {
 ; No instrumentation between call and ret.
 ; CHECK-NEXT: ret i32
 
-declare i32* @MismatchingMustTailCall(i32 %a)
+declare ptr @MismatchingMustTailCall(i32 %a)
 
-define i8* @MismatchingCallMustTailCall(i32 %a) sanitize_memory {
-  %b = musttail call i32* @MismatchingMustTailCall(i32 %a)
-  %c = bitcast i32* %b to i8*
-  ret i8* %c
+define ptr @MismatchingCallMustTailCall(i32 %a) sanitize_memory {
+  %b = musttail call ptr @MismatchingMustTailCall(i32 %a)
+  ret ptr %b
 }
 
 ; For "musttail" calls we can not insert any shadow manipulating code between
 ; call and the return instruction. And we don't need to, because everything is
 ; taken care of in the callee.
 
-; CHECK-LABEL: define i8* @MismatchingCallMustTailCall
-; CHECK: musttail call i32* @MismatchingMustTailCall
+; CHECK-LABEL: define ptr @MismatchingCallMustTailCall
+; CHECK: musttail call ptr @MismatchingMustTailCall
 ; No instrumentation between call and ret.
-; CHECK-NEXT: bitcast i32* {{.*}} to i8*
-; CHECK-NEXT: ret i8*
+; CHECK-NEXT: ret ptr
 
 
 ; CHECK-LABEL: define internal void @msan.module_ctor() #[[#ATTR:]] {
 ; CHECK: call void @__msan_init()
 
 ; CHECK-CALLS: declare void @__msan_maybe_warning_1(i8 zeroext, i32 zeroext)
-; CHECK-CALLS: declare void @__msan_maybe_store_origin_1(i8 zeroext, i8*, i32 zeroext)
+; CHECK-CALLS: declare void @__msan_maybe_store_origin_1(i8 zeroext, ptr, i32 zeroext)
 ; CHECK-CALLS: declare void @__msan_maybe_warning_2(i16 zeroext, i32 zeroext)
-; CHECK-CALLS: declare void @__msan_maybe_store_origin_2(i16 zeroext, i8*, i32 zeroext)
+; CHECK-CALLS: declare void @__msan_maybe_store_origin_2(i16 zeroext, ptr, i32 zeroext)
 ; CHECK-CALLS: declare void @__msan_maybe_warning_4(i32 zeroext, i32 zeroext)
-; CHECK-CALLS: declare void @__msan_maybe_store_origin_4(i32 zeroext, i8*, i32 zeroext)
+; CHECK-CALLS: declare void @__msan_maybe_store_origin_4(i32 zeroext, ptr, i32 zeroext)
 ; CHECK-CALLS: declare void @__msan_maybe_warning_8(i64 zeroext, i32 zeroext)
-; CHECK-CALLS: declare void @__msan_maybe_store_origin_8(i64 zeroext, i8*, i32 zeroext)
+; CHECK-CALLS: declare void @__msan_maybe_store_origin_8(i64 zeroext, ptr, i32 zeroext)
 
 ; CHECK:       attributes #[[#ATTR]] = { nounwind }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
index 8089703e811a0..3b4ef9245bb08 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
@@ -19,46 +19,45 @@ target triple = "x86_64-unknown-linux-gnu"
 !14 = !DILocation(line: 9, column: 4, scope: !2)
 !15 = !DILocation(line: 9, column: 5, scope: !2)
 
-define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
+define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @Store(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8, !dbg [[DBG1:![0-9]+]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1:![0-9]+]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint i32* [[P:%.*]] to i64, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to i32*, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to i32*, !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 [[TMP2]], i32* [[TMP6]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[P]] to i8*, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], i8* [[TMP9]], i32 zeroext [[TMP3]]), !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 [[X:%.*]], i32* [[P]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[TMP6]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], ptr [[P]], i32 zeroext [[TMP3]]), !dbg [[DBG1]]
+; CHECK-NEXT:    store i32 [[X:%.*]], ptr [[P]], align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  store i32 %x, i32* %p, align 4, !dbg !10
+  store i32 %x, ptr %p, align 4, !dbg !10
   ret void
 }
 
-define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
+define void @LoadAndCmp(ptr nocapture %a) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @LoadAndCmp(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[A:%.*]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i32* [[A]] to i64, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[A:%.*]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*, !dbg [[DBG1]]
-; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, i32* [[TMP5]], align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP5]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = xor i32 [[TMP2]], 0, !dbg [[DBG7:![0-9]+]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = or i32 [[_MSLD]], 0, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0, !dbg [[DBG7]]
@@ -71,14 +70,14 @@ define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
 ; CHECK-NEXT:    call void @__msan_maybe_warning_1(i8 zeroext [[TMP15]], i32 zeroext [[TMP8]]), !dbg [[DBG8]]
 ; CHECK-NEXT:    br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]], !dbg [[DBG8]]
 ; CHECK:       if.then:
-; CHECK-NEXT:    store i64 0, i64* @__msan_va_arg_overflow_size_tls, align 8
+; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
 ; CHECK-NEXT:    tail call void (...) @foo() #[[ATTR5:[0-9]+]]
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = load i32, i32* %a, align 4, !dbg !10
+  %0 = load i32, ptr %a, align 4, !dbg !10
   %tobool = icmp eq i32 %0, 0, !dbg !11
   br i1 %tobool, label %if.end, label %if.then, !dbg !12
 
@@ -94,196 +93,194 @@ define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
 ; CHECK-LABEL: @ReturnInt(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    ret i32 123, !dbg [[DBG1]]
 ;
 entry:
   ret i32 123, !dbg !10
 }
 
-define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
+define void @CopyRetVal(ptr nocapture %a) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @CopyRetVal(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 @ReturnInt() #[[ATTR5]], !dbg [[DBG1]]
-; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* @__msan_retval_origin_tls, align 4, !dbg [[DBG7]]
+; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4, !dbg [[DBG7]]
 ; CHECK-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i32* [[A:%.*]] to i64, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*, !dbg [[DBG7]]
-; CHECK-NEXT:    store i32 [[_MSRET]], i32* [[TMP5]], align 4, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[A]] to i8*, !dbg [[DBG7]]
-; CHECK-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSRET]], i8* [[TMP8]], i32 zeroext [[TMP2]]), !dbg [[DBG7]]
-; CHECK-NEXT:    store i32 [[CALL]], i32* [[A]], align 4, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG7]]
+; CHECK-NEXT:    store i32 [[_MSRET]], ptr [[TMP5]], align 4, !dbg [[DBG7]]
+; CHECK-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSRET]], ptr [[A]], i32 zeroext [[TMP2]]), !dbg [[DBG7]]
+; CHECK-NEXT:    store i32 [[CALL]], ptr [[A]], align 4, !dbg [[DBG7]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %call = tail call i32 @ReturnInt() nounwind, !dbg !10
-  store i32 %call, i32* %a, align 4, !dbg !11
+  store i32 %call, ptr %a, align 4, !dbg !11
   ret void
 }
 
 
 
-define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
+define void @SExt(ptr nocapture %a, ptr nocapture %b) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @SExt(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i64*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i16, i16* [[B:%.*]], align 2, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint i16* [[B]] to i64, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, ptr [[B:%.*]], align 2, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i16*, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = and i64 [[TMP8]], -4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to i32*, !dbg [[DBG1]]
-; CHECK-NEXT:    [[_MSLD:%.*]] = load i16, i16* [[TMP7]], align 2, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP11:%.*]] = load i32, i32* [[TMP10]], align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i16, ptr [[TMP7]], align 2, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = sext i16 [[_MSLD]] to i32, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP12:%.*]] = sext i16 [[TMP4]] to i32, !dbg [[DBG7]]
 ; CHECK-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[TMP3]]), !dbg [[DBG8]]
-; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint i32* [[A:%.*]] to i64, !dbg [[DBG8]]
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A:%.*]] to i64, !dbg [[DBG8]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG8]]
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to i32*, !dbg [[DBG8]]
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG8]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG8]]
-; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to i32*, !dbg [[DBG8]]
-; CHECK-NEXT:    store i32 [[_MSPROP]], i32* [[TMP15]], align 4, !dbg [[DBG8]]
-; CHECK-NEXT:    [[TMP18:%.*]] = bitcast i32* [[A]] to i8*, !dbg [[DBG8]]
-; CHECK-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSPROP]], i8* [[TMP18]], i32 zeroext [[TMP11]]), !dbg [[DBG8]]
-; CHECK-NEXT:    store i32 [[TMP12]], i32* [[A]], align 4, !dbg [[DBG8]]
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT:    store i32 [[_MSPROP]], ptr [[TMP15]], align 4, !dbg [[DBG8]]
+; CHECK-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSPROP]], ptr [[A]], i32 zeroext [[TMP11]]), !dbg [[DBG8]]
+; CHECK-NEXT:    store i32 [[TMP12]], ptr [[A]], align 4, !dbg [[DBG8]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %0 = load i16, i16* %b, align 2, !dbg !10
+  %0 = load i16, ptr %b, align 2, !dbg !10
   %1 = sext i16 %0 to i32, !dbg !11
-  store i32 %1, i32* %a, align 4, !dbg !12
+  store i32 %1, ptr %a, align 4, !dbg !12
   ret void
 }
 
-define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
+define void @MemSet(ptr nocapture %x) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @MemSet(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call i8* @__msan_memset(i8* [[X:%.*]], i32 42, i64 10), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i1 false), !dbg !10
+  call void @llvm.memset.p0.i64(ptr %x, i8 42, i64 10, i1 false), !dbg !10
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 
 
 
-define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
+define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @MemCpy(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i64*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i8* @__msan_memcpy(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false), !dbg !10
+  call void @llvm.memcpy.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false), !dbg !10
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 
-define void @MemSetInline(i8* nocapture %x) nounwind uwtable sanitize_memory {
+define void @MemSetInline(ptr nocapture %x) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @MemSetInline(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call i8* @__msan_memset(i8* [[X:%.*]], i32 42, i64 10), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X:%.*]], i32 42, i64 10), !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  call void @llvm.memset.inline.p0i8.i64(i8* %x, i8 42, i64 10, i1 false), !dbg !10
+  call void @llvm.memset.inline.p0.i64(ptr %x, i8 42, i64 10, i1 false), !dbg !10
   ret void
 }
 
-declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 
 
-define void @MemCpyInline(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
+define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @MemCpyInline(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i64*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i8* @__msan_memcpy(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false), !dbg !10
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false), !dbg !10
   ret void
 }
 
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 
-define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
+define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @MemMove(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i64*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = call i8* @__msan_memmove(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X:%.*]], ptr [[Y:%.*]], i64 10), !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i1 false), !dbg !10
+  call void @llvm.memmove.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false), !dbg !10
   ret void
 }
 
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 
-declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
-declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
-declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
+declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture writeonly, i8, i64, i32) nounwind
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32) nounwind
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32) nounwind
 
-define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
+define void @atomic_memcpy(ptr nocapture %x, ptr nocapture %y) nounwind {
 ; CHECK-LABEL: @atomic_memcpy(
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[X:%.*]], i8* align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1), !dbg !10
+  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1), !dbg !10
   ret void
 }
 
-define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
+define void @atomic_memmove(ptr nocapture %x, ptr nocapture %y) nounwind {
 ; CHECK-LABEL: @atomic_memmove(
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 [[X:%.*]], i8* align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X:%.*]], ptr align 2 [[Y:%.*]], i64 16, i32 1), !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1), !dbg !10
+  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1), !dbg !10
   ret void
 }
 
-define void @atomic_memset(i8* nocapture %x) nounwind {
+define void @atomic_memset(ptr nocapture %x) nounwind {
 ; CHECK-LABEL: @atomic_memset(
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 [[X:%.*]], i8 88, i64 16, i32 1), !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 [[X:%.*]], i8 88, i64 16, i32 1), !dbg [[DBG1]]
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1), !dbg !10
+  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %x, i8 88, i64 16, i32 1), !dbg !10
   ret void
 }
 
@@ -293,12 +290,12 @@ define void @atomic_memset(i8* nocapture %x) nounwind {
 define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
 ; CHECK-LABEL: @Select(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i1, i1* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i1*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 16) to i32*), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = select i1 [[C:%.*]], i32 [[TMP2]], i32 [[TMP4]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = xor i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG1]]
@@ -308,8 +305,8 @@ define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_mem
 ; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[COND:%.*]] = select i1 [[C]], i32 [[A]], i32 [[B]], !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 [[_MSPROP_SELECT]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    store i32 [[TMP11]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store i32 [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP11]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret i32 [[COND]]
 ;
 entry:
@@ -323,12 +320,12 @@ entry:
 define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
 ; CHECK-LABEL: @SelectVector(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i1>, <8 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i1>*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 32) to i32*), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 16) to i32*), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = select <8 x i1> [[C:%.*]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = xor <8 x i16> [[A:%.*]], [[B:%.*]], !dbg [[DBG1]]
@@ -342,8 +339,8 @@ define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind
 ; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP11]], i32 [[TMP3]], i32 [[TMP5]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP13]], i32 [[TMP1]], i32 [[TMP14]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[COND:%.*]] = select <8 x i1> [[C]], <8 x i16> [[A]], <8 x i16> [[B]], !dbg [[DBG1]]
-; CHECK-NEXT:    store <8 x i16> [[_MSPROP_SELECT]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    store i32 [[TMP15]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP15]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret <8 x i16> [[COND]]
 ;
 entry:
@@ -354,20 +351,20 @@ entry:
 
 
 
-define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
+define ptr @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
 ; CHECK-LABEL: @IntToPtr(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[X:%.*]] to i8*, !dbg [[DBG1]]
-; CHECK-NEXT:    store i64 [[TMP0]], i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_retval_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
-; CHECK-NEXT:    ret i8* [[TMP2]]
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[X:%.*]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    store i64 [[TMP0]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    ret ptr [[TMP2]]
 ;
 entry:
-  %0 = inttoptr i64 %x to i8*, !dbg !10
-  ret i8* %0
+  %0 = inttoptr i64 %x to ptr, !dbg !10
+  ret ptr %0
 }
 
 
@@ -377,15 +374,15 @@ entry:
 define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
 ; CHECK-LABEL: @Div(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @__msan_maybe_warning_4(i32 zeroext [[TMP0]], i32 zeroext [[TMP1]]), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[DIV:%.*]] = udiv i32 [[A:%.*]], [[B:%.*]], !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 [[TMP2]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    store i32 [[TMP3]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP3]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret i32 [[DIV]]
 ;
 entry:
@@ -403,29 +400,28 @@ define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @ShadowLoadAlignmentLarge(
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[Y:%.*]] = alloca i32, align 64, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[Y]] to i64, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i8*, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], -4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to i32*, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 64 [[TMP3]], i8 -1, i64 4, i1 false), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[Y]] to i8*, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @__msan_set_alloca_origin_with_descr(i8* [[TMP7]], i64 4, i8* bitcast (i32* @[[GLOB0:[0-9]+]] to i8*), i8* getelementptr inbounds ([2 x i8], [2 x i8]* @[[GLOB1:[0-9]+]], i32 0, i32 0)), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = load volatile i32, i32* [[Y]], align 64, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint i32* [[Y]] to i64, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 64 [[TMP3]], i8 -1, i64 4, i1 false), !dbg [[DBG1]]
+; CHECK-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = load volatile i32, ptr [[Y]], align 64, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[Y]] to i64, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to i32*, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 17592186044416, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to i32*, !dbg [[DBG7]]
-; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, i32* [[TMP11]], align 64, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 64, !dbg [[DBG7]]
-; CHECK-NEXT:    store i32 [[_MSLD]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    store i32 [[TMP14]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr, !dbg [[DBG7]]
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP11]], align 64, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 64, !dbg [[DBG7]]
+; CHECK-NEXT:    store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret i32 [[TMP8]]
 ;
   %y = alloca i32, align 64, !dbg !10
-  %1 = load volatile i32, i32* %y, align 64, !dbg !11
+  %1 = load volatile i32, ptr %y, align 64, !dbg !11
   ret i32 %1
 }
 
@@ -433,16 +429,16 @@ define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
 
 define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
 ; CHECK-LABEL: @ExtractElement(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 16) to i32*), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 [[IDX:%.*]], !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[X:%.*]] = extractelement <4 x i32> [[VEC:%.*]], i32 [[IDX]], !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 [[_MSPROP]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    store i32 [[TMP4]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP4]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
   %x = extractelement <4 x i32> %vec, i32 %idx, !dbg !10
@@ -452,12 +448,12 @@ define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
 
 define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
 ; CHECK-LABEL: @InsertElement(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 16) to i32*), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 24) to i32*), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX:%.*]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0, !dbg [[DBG1]]
@@ -466,8 +462,8 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memor
 ; CHECK-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 [[TMP8]], !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]]), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[VEC1:%.*]] = insertelement <4 x i32> [[VEC:%.*]], i32 [[X:%.*]], i32 [[IDX]], !dbg [[DBG1]]
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    store i32 [[TMP10]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP10]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret <4 x i32> [[VEC1]]
 ;
   %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx, !dbg !10
@@ -477,18 +473,18 @@ define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memor
 
 define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
 ; CHECK-LABEL: @ShuffleVector(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 16) to i32*), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i128 [[TMP5]], 0, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP4]], i32 [[TMP2]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>, !dbg [[DBG1]]
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    store i32 [[TMP7]], i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP7]], ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret <4 x i32> [[VEC2]]
 ;
   %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>, !dbg !10
@@ -497,94 +493,85 @@ define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory
 
 
 
-%struct.__va_list_tag = type { i32, i32, i8*, i8* }
-declare void @llvm.va_start(i8*) nounwind
+%struct.__va_list_tag = type { i32, i32, ptr, ptr }
+declare void @llvm.va_start(ptr) nounwind
 
 define void @VAStart(i32 %x, ...) sanitize_memory {
 ; CHECK-LABEL: @VAStart(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* @__msan_va_arg_overflow_size_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]], !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 1, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP2]], i8* align 8 bitcast ([100 x i64]* @__msan_va_arg_tls to i8*), i64 [[TMP1]], i1 false), !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP1]], i1 false), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = alloca i8, i64 [[TMP1]], align 1, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP3]], i8* align 8 bitcast ([200 x i32]* @__msan_va_arg_origin_tls to i8*), i64 [[TMP1]], i1 false), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4, !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[TMP1]], i1 false), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4, !dbg [[DBG1]]
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint i32* [[X_ADDR]] to i64, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to i8*, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i64 [[TMP7]], 17592186044416, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = and i64 [[TMP9]], -4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to i32*, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP8]], i8 -1, i64 4, i1 false), !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i32* [[X_ADDR]] to i8*, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @__msan_set_alloca_origin_with_descr(i8* [[TMP12]], i64 4, i8* bitcast (i32* @[[GLOB2:[0-9]+]] to i8*), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @[[GLOB3:[0-9]+]], i32 0, i32 0)), !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP8]], i8 -1, i64 4, i1 false), !dbg [[DBG1]]
+; CHECK-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[X_ADDR]], i64 4, ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[VA:%.*]] = alloca [1 x %struct.__va_list_tag], align 16, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint [1 x %struct.__va_list_tag]* [[VA]] to i64, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to i8*, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP17:%.*]] = and i64 [[TMP16]], -4, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to i32*, !dbg [[DBG7]]
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 16 [[TMP15]], i8 -1, i64 24, i1 false), !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP19:%.*]] = bitcast [1 x %struct.__va_list_tag]* [[VA]] to i8*, !dbg [[DBG7]]
-; CHECK-NEXT:    call void @__msan_set_alloca_origin_with_descr(i8* [[TMP19]], i64 24, i8* bitcast (i32* @[[GLOB4:[0-9]+]] to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @[[GLOB5:[0-9]+]], i32 0, i32 0)), !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint i32* [[X_ADDR]] to i64, !dbg [[DBG8]]
+; CHECK-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr, !dbg [[DBG7]]
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP15]], i8 -1, i64 24, i1 false), !dbg [[DBG7]]
+; CHECK-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[VA]], i64 24, ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint ptr [[X_ADDR]] to i64, !dbg [[DBG8]]
 ; CHECK-NEXT:    [[TMP21:%.*]] = xor i64 [[TMP20]], 87960930222080, !dbg [[DBG8]]
-; CHECK-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to i32*, !dbg [[DBG8]]
+; CHECK-NEXT:    [[TMP22:%.*]] = inttoptr i64 [[TMP21]] to ptr, !dbg [[DBG8]]
 ; CHECK-NEXT:    [[TMP23:%.*]] = add i64 [[TMP21]], 17592186044416, !dbg [[DBG8]]
-; CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to i32*, !dbg [[DBG8]]
-; CHECK-NEXT:    store i32 [[TMP4]], i32* [[TMP22]], align 4, !dbg [[DBG8]]
-; CHECK-NEXT:    [[TMP25:%.*]] = bitcast i32* [[X_ADDR]] to i8*, !dbg [[DBG8]]
-; CHECK-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP4]], i8* [[TMP25]], i32 zeroext [[TMP5]]), !dbg [[DBG8]]
-; CHECK-NEXT:    store i32 [[X:%.*]], i32* [[X_ADDR]], align 4, !dbg [[DBG8]]
-; CHECK-NEXT:    [[ARRAYDECAY:%.*]] = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* [[VA]], i32 0, i32 0, !dbg [[DBG9:![0-9]+]]
-; CHECK-NEXT:    [[ARRAYDECAY1:%.*]] = bitcast %struct.__va_list_tag* [[ARRAYDECAY]] to i8*, !dbg [[DBG10:![0-9]+]]
-; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint i8* [[ARRAYDECAY1]] to i64, !dbg [[DBG11:![0-9]+]]
+; CHECK-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr, !dbg [[DBG8]]
+; CHECK-NEXT:    store i32 [[TMP4]], ptr [[TMP22]], align 4, !dbg [[DBG8]]
+; CHECK-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP4]], ptr [[X_ADDR]], i32 zeroext [[TMP5]]), !dbg [[DBG8]]
+; CHECK-NEXT:    store i32 [[X:%.*]], ptr [[X_ADDR]], align 4, !dbg [[DBG8]]
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11:![0-9]+]]
 ; CHECK-NEXT:    [[TMP27:%.*]] = xor i64 [[TMP26]], 87960930222080, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to i8*, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr, !dbg [[DBG11]]
 ; CHECK-NEXT:    [[TMP29:%.*]] = add i64 [[TMP27]], 17592186044416, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to i32*, !dbg [[DBG11]]
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP28]], i8 0, i64 24, i1 false), !dbg [[DBG11]]
-; CHECK-NEXT:    call void @llvm.va_start(i8* [[ARRAYDECAY1]]), !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP31:%.*]] = ptrtoint i8* [[ARRAYDECAY1]] to i64, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr, !dbg [[DBG11]]
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 24, i1 false), !dbg [[DBG11]]
+; CHECK-NEXT:    call void @llvm.va_start(ptr [[VA]]), !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP31:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11]]
 ; CHECK-NEXT:    [[TMP32:%.*]] = add i64 [[TMP31]], 16, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to i64**, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP34:%.*]] = load i64*, i64** [[TMP33]], align 8, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP35:%.*]] = ptrtoint i64* [[TMP34]] to i64, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP35:%.*]] = ptrtoint ptr [[TMP34]] to i64, !dbg [[DBG11]]
 ; CHECK-NEXT:    [[TMP36:%.*]] = xor i64 [[TMP35]], 87960930222080, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP37:%.*]] = inttoptr i64 [[TMP36]] to i8*, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP37:%.*]] = inttoptr i64 [[TMP36]] to ptr, !dbg [[DBG11]]
 ; CHECK-NEXT:    [[TMP38:%.*]] = add i64 [[TMP36]], 17592186044416, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP39:%.*]] = inttoptr i64 [[TMP38]] to i32*, !dbg [[DBG11]]
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP37]], i8* align 16 [[TMP2]], i64 176, i1 false), !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP40:%.*]] = bitcast i32* [[TMP39]] to i8*, !dbg [[DBG11]]
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP40]], i8* align 16 [[TMP3]], i64 176, i1 false), !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP41:%.*]] = ptrtoint i8* [[ARRAYDECAY1]] to i64, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP39:%.*]] = inttoptr i64 [[TMP38]] to ptr, !dbg [[DBG11]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP2]], i64 176, i1 false), !dbg [[DBG11]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP39]], ptr align 16 [[TMP3]], i64 176, i1 false), !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP41:%.*]] = ptrtoint ptr [[VA]] to i64, !dbg [[DBG11]]
 ; CHECK-NEXT:    [[TMP42:%.*]] = add i64 [[TMP41]], 8, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP43:%.*]] = inttoptr i64 [[TMP42]] to i64**, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP44:%.*]] = load i64*, i64** [[TMP43]], align 8, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP45:%.*]] = ptrtoint i64* [[TMP44]] to i64, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP43:%.*]] = inttoptr i64 [[TMP42]] to ptr, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP44:%.*]] = load ptr, ptr [[TMP43]], align 8, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP45:%.*]] = ptrtoint ptr [[TMP44]] to i64, !dbg [[DBG11]]
 ; CHECK-NEXT:    [[TMP46:%.*]] = xor i64 [[TMP45]], 87960930222080, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP47:%.*]] = inttoptr i64 [[TMP46]] to i8*, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP47:%.*]] = inttoptr i64 [[TMP46]] to ptr, !dbg [[DBG11]]
 ; CHECK-NEXT:    [[TMP48:%.*]] = add i64 [[TMP46]], 17592186044416, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP49:%.*]] = inttoptr i64 [[TMP48]] to i32*, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP50:%.*]] = getelementptr i8, i8* [[TMP2]], i32 176, !dbg [[DBG11]]
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP47]], i8* align 16 [[TMP50]], i64 [[TMP0]], i1 false), !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP51:%.*]] = getelementptr i8, i8* [[TMP3]], i32 176, !dbg [[DBG11]]
-; CHECK-NEXT:    [[TMP52:%.*]] = bitcast i32* [[TMP49]] to i8*, !dbg [[DBG11]]
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP52]], i8* align 16 [[TMP51]], i64 [[TMP0]], i1 false), !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP49:%.*]] = inttoptr i64 [[TMP48]] to ptr, !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP50:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176, !dbg [[DBG11]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP47]], ptr align 16 [[TMP50]], i64 [[TMP0]], i1 false), !dbg [[DBG11]]
+; CHECK-NEXT:    [[TMP51:%.*]] = getelementptr i8, ptr [[TMP3]], i32 176, !dbg [[DBG11]]
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP49]], ptr align 16 [[TMP51]], i64 [[TMP0]], i1 false), !dbg [[DBG11]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %x.addr = alloca i32, align 4, !dbg !10
   %va = alloca [1 x %struct.__va_list_tag], align 16, !dbg !11
-  store i32 %x, i32* %x.addr, align 4, !dbg !12
-  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i32 0, i32 0, !dbg !13
-  %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*, !dbg !14
-  call void @llvm.va_start(i8* %arraydecay1), !dbg !15
+  store i32 %x, ptr %x.addr, align 4, !dbg !12
+  call void @llvm.va_start(ptr %va), !dbg !15
   ret void
 }
 
@@ -604,8 +591,8 @@ define i32 @NoSanitizeMemory(i32 %x) uwtable {
 ; CHECK-NEXT:    tail call void @bar(), !dbg [[DBG8]]
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
 entry:
@@ -628,29 +615,29 @@ define i32 @NoSanitizeMemoryAlloca() {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[P:%.*]] = alloca i32, align 4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i32* [[P]] to i64, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to i8*, !dbg [[DBG1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 4 [[TMP2]], i8 0, i64 4, i1 false), !dbg [[DBG1]]
-; CHECK-NEXT:    store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8, !dbg [[DBG7]]
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8, !dbg [[DBG7]]
-; CHECK-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(i32* [[P]]), !dbg [[DBG7]]
-; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* @__msan_retval_origin_tls, align 4
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG1]]
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false), !dbg [[DBG1]]
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG7]]
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG7]]
+; CHECK-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(ptr [[P]]), !dbg [[DBG7]]
+; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
 entry:
   %p = alloca i32, align 4, !dbg !10
-  %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p), !dbg !11
+  %x = call i32 @NoSanitizeMemoryAllocaHelper(ptr %p), !dbg !11
   ret i32 %x
 }
 
-declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
+declare i32 @NoSanitizeMemoryAllocaHelper(ptr %p)
 
 
 
@@ -659,13 +646,13 @@ define i32 @NoSanitizeMemoryUndef() {
 ; CHECK-LABEL: @NoSanitizeMemoryUndef(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8, !dbg [[DBG1]]
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    store i32 0, ptr @__msan_param_tls, align 8, !dbg [[DBG1]]
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8, !dbg [[DBG1]]
 ; CHECK-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryUndefHelper(i32 undef), !dbg [[DBG1]]
-; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @__msan_retval_origin_tls, align 4
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret i32 [[X]]
 ;
 entry:
@@ -675,9 +662,9 @@ entry:
 
 declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg %0, i8* nocapture %1)
-declare void @llvm.lifetime.end.p0i8(i64 immarg %0, i8* nocapture %1)
-declare void @foo8(i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg %0, ptr nocapture %1)
+declare void @llvm.lifetime.end.p0(i64 immarg %0, ptr nocapture %1)
+declare void @foo8(ptr nocapture)
 
 
 define void @msan() sanitize_memory {
@@ -685,24 +672,24 @@ define void @msan() sanitize_memory {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call void @llvm.donothing(), !dbg [[DBG1]]
 ; CHECK-NEXT:    [[TEXT:%.*]] = alloca i8, align 1, !dbg [[DBG1]]
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0i8(i64 1, i8* [[TEXT]]), !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint i8* [[TEXT]] to i64, !dbg [[DBG7]]
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 1, ptr [[TEXT]]), !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[TEXT]] to i64, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to i8*, !dbg [[DBG7]]
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416, !dbg [[DBG7]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = and i64 [[TMP3]], -4, !dbg [[DBG7]]
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*, !dbg [[DBG7]]
-; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 1 [[TMP2]], i8 -1, i64 1, i1 false), !dbg [[DBG7]]
-; CHECK-NEXT:    call void @__msan_set_alloca_origin_with_descr(i8* [[TEXT]], i64 1, i8* bitcast (i32* @[[GLOB6:[0-9]+]] to i8*), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @[[GLOB7:[0-9]+]], i32 0, i32 0)), !dbg [[DBG7]]
-; CHECK-NEXT:    store i64 0, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8, !dbg [[DBG8]]
-; CHECK-NEXT:    call void @foo8(i8* [[TEXT]]), !dbg [[DBG8]]
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0i8(i64 1, i8* [[TEXT]]), !dbg [[DBG9]]
-; CHECK-NEXT:    ret void, !dbg [[DBG10]]
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr, !dbg [[DBG7]]
+; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 1 [[TMP2]], i8 -1, i64 1, i1 false), !dbg [[DBG7]]
+; CHECK-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[TEXT]], i64 1, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG7]]
+; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG8]]
+; CHECK-NEXT:    call void @foo8(ptr [[TEXT]]), !dbg [[DBG8]]
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 1, ptr [[TEXT]]), !dbg
+; CHECK-NEXT:    ret void, !dbg
 ;
 entry:
   %text = alloca i8, align 1, !dbg !10
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %text), !dbg !11
-  call void @foo8(i8* %text), !dbg !12
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %text), !dbg !13
+  call void @llvm.lifetime.start.p0(i64 1, ptr %text), !dbg !11
+  call void @foo8(ptr %text), !dbg !12
+  call void @llvm.lifetime.end.p0(i64 1, ptr %text), !dbg !13
   ret void, !dbg !14
 }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll
index 10fe12ba5f51d..30ab3280bdece 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_eager.ll
@@ -18,8 +18,8 @@ define noundef i32 @NormalRet() nounwind uwtable sanitize_memory {
 define i32 @PartialRet() nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @PartialRet(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
 ; CHECK-NEXT:    ret i32 123
 ;
   ret i32 123
@@ -28,15 +28,15 @@ define i32 @PartialRet() nounwind uwtable sanitize_memory {
 define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @LoadedRet(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
-; CHECK-NEXT:    [[O:%.*]] = load i32, i32* [[P]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
+; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to ptr
+; CHECK-NEXT:    [[O:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
-; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
 ; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, i32* [[TMP3]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0:![0-9]+]]
 ; CHECK:       7:
@@ -45,8 +45,8 @@ define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory {
 ; CHECK:       8:
 ; CHECK-NEXT:    ret i32 [[O]]
 ;
-  %p = inttoptr i64 0 to i32 *
-  %o = load i32, i32 *%p
+  %p = inttoptr i64 0 to ptr
+  %o = load i32, ptr %p
   ret i32 %o
 }
 
@@ -54,70 +54,70 @@ define noundef i32 @LoadedRet() nounwind uwtable sanitize_memory {
 define void @NormalArg(i32 noundef %a) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @NormalArg(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
+; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to ptr
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
-; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
 ; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
-; CHECK-NEXT:    store i32 0, i32* [[TMP3]], align 4
-; CHECK-NEXT:    store i32 [[A:%.*]], i32* [[P]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    store i32 0, ptr [[TMP3]], align 4
+; CHECK-NEXT:    store i32 [[A:%.*]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %p = inttoptr i64 0 to i32 *
-  store i32 %a, i32 *%p
+  %p = inttoptr i64 0 to ptr
+  store i32 %a, ptr %p
   ret void
 }
 
 define void @NormalArgAfterNoUndef(i32 noundef %a, i32 %b) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @NormalArgAfterNoUndef(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i32*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i32* [[P]] to i64
+; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to ptr
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
 ; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*
-; CHECK-NEXT:    store i32 [[TMP1]], i32* [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i32 [[TMP1]], ptr [[TMP5]], align 4
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    store i32 [[TMP2]], i32* [[TMP7]], align 4
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[TMP7]], align 4
 ; CHECK-NEXT:    br label [[TMP9]]
 ; CHECK:       9:
-; CHECK-NEXT:    store i32 [[B:%.*]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[B:%.*]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %p = inttoptr i64 0 to i32 *
-  store i32 %b, i32 *%p
+  %p = inttoptr i64 0 to ptr
+  store i32 %b, ptr %p
   ret void
 }
 
 define void @PartialArg(i32 %a) nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @PartialArg(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
-; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint i32* [[P]] to i64
+; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to ptr
+; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
 ; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*
-; CHECK-NEXT:    store i32 [[TMP1]], i32* [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    store i32 [[TMP1]], ptr [[TMP5]], align 4
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
 ; CHECK:       8:
-; CHECK-NEXT:    store i32 [[TMP2]], i32* [[TMP7]], align 4
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[TMP7]], align 4
 ; CHECK-NEXT:    br label [[TMP9]]
 ; CHECK:       9:
-; CHECK-NEXT:    store i32 [[A:%.*]], i32* [[P]], align 4
+; CHECK-NEXT:    store i32 [[A:%.*]], ptr [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %p = inttoptr i64 0 to i32 *
-  store i32 %a, i32 *%p
+  %p = inttoptr i64 0 to ptr
+  store i32 %a, ptr %p
   ret void
 }
 
@@ -137,7 +137,7 @@ define void @CallNormalArgAfterNoUndef() nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @CallNormalArgAfterNoUndef(
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @NormalRet() #[[ATTR0]]
-; CHECK-NEXT:    store i32 0, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @NormalArgAfterNoUndef(i32 [[R]], i32 [[R]]) #[[ATTR0]]
 ; CHECK-NEXT:    ret void
 ;
@@ -149,15 +149,15 @@ define void @CallNormalArgAfterNoUndef() nounwind uwtable sanitize_memory {
 define void @CallWithLoaded() nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @CallWithLoaded(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to i32*
-; CHECK-NEXT:    [[O:%.*]] = load i32, i32* [[P]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[P]] to i64
+; CHECK-NEXT:    [[P:%.*]] = inttoptr i64 0 to ptr
+; CHECK-NEXT:    [[O:%.*]] = load i32, ptr [[P]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
-; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32*
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
 ; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
-; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, i32* [[TMP3]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSLD]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
 ; CHECK:       7:
@@ -167,8 +167,8 @@ define void @CallWithLoaded() nounwind uwtable sanitize_memory {
 ; CHECK-NEXT:    call void @NormalArg(i32 [[O]]) #[[ATTR0]]
 ; CHECK-NEXT:    ret void
 ;
-  %p = inttoptr i64 0 to i32 *
-  %o = load i32, i32 *%p
+  %p = inttoptr i64 0 to ptr
+  %o = load i32, ptr %p
   call void @NormalArg(i32 %o) nounwind uwtable sanitize_memory
   ret void
 }
@@ -176,12 +176,12 @@ define void @CallWithLoaded() nounwind uwtable sanitize_memory {
 define void @CallPartial() nounwind uwtable sanitize_memory {
 ; CHECK-LABEL: @CallPartial(
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @PartialRet() #[[ATTR0]]
-; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @__msan_retval_origin_tls, align 4
-; CHECK-NEXT:    store i32 [[_MSRET]], i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
-; CHECK-NEXT:    store i32 [[TMP1]], i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
+; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
+; CHECK-NEXT:    store i32 [[_MSRET]], ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    store i32 [[TMP1]], ptr @__msan_param_origin_tls, align 4
 ; CHECK-NEXT:    call void @PartialArg(i32 [[R]]) #[[ATTR0]]
 ; CHECK-NEXT:    ret void
 ;

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
index ef8fabc86ee84..f4fbf5fa4b118 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_kernel_basic.ll
@@ -31,9 +31,9 @@ entry:
 
 ; Check instrumentation of stores
 
-define void @Store1(i8* nocapture %p, i8 %x) nounwind uwtable sanitize_memory {
+define void @Store1(ptr nocapture %p, i8 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i8 %x, i8* %p
+  store i8 %x, ptr %p
   ret void
 }
 
@@ -43,13 +43,13 @@ entry:
 ; CHECK: [[PARAM_SHADOW:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 0
 ; CHECK: [[BASE:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
 ; CHECK: [[SHADOW_PTR:%[a-z0-9_]+]] = inttoptr {{.*}} [[BASE]]
-; CHECK: [[SHADOW:%[a-z0-9]+]] = load i64, i64* [[SHADOW_PTR]]
+; CHECK: [[SHADOW:%[a-z0-9]+]] = load i64, ptr [[SHADOW_PTR]]
 ; CHECK: [[BASE2:%[0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
 ; Load the shadow of %p and check it
 ; CHECK: icmp ne i64 [[SHADOW]]
 ; CHECK: br i1
 ; CHECK: {{^[0-9]+}}:
-; CHECK: @__msan_metadata_ptr_for_store_1(i8* %p)
+; CHECK: @__msan_metadata_ptr_for_store_1(ptr %p)
 ; CHECK: store i8
 ; If the new shadow is non-zero, jump to __msan_chain_origin()
 ; CHECK: icmp
@@ -63,9 +63,9 @@ entry:
 ; CHECK: store i8
 ; CHECK: ret void
 
-define void @Store2(i16* nocapture %p, i16 %x) nounwind uwtable sanitize_memory {
+define void @Store2(ptr nocapture %p, i16 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i16 %x, i16* %p
+  store i16 %x, ptr %p
   ret void
 }
 
@@ -79,8 +79,7 @@ entry:
 ; CHECK: icmp
 ; CHECK: br i1
 ; CHECK: {{^[0-9]+}}:
-; CHECK: [[REG:%[0-9]+]] = bitcast i16* %p to i8*
-; CHECK: @__msan_metadata_ptr_for_store_2(i8* [[REG]])
+; CHECK: @__msan_metadata_ptr_for_store_2(ptr %p)
 ; CHECK: store i16
 ; If the new shadow is non-zero, jump to __msan_chain_origin()
 ; CHECK: icmp
@@ -95,9 +94,9 @@ entry:
 ; CHECK: ret void
 
 
-define void @Store4(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
+define void @Store4(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i32 %x, i32* %p
+  store i32 %x, ptr %p
   ret void
 }
 
@@ -111,8 +110,7 @@ entry:
 ; CHECK: icmp
 ; CHECK: br i1
 ; CHECK: {{^[0-9]+}}:
-; CHECK: [[REG:%[0-9]+]] = bitcast i32* %p to i8*
-; CHECK: @__msan_metadata_ptr_for_store_4(i8* [[REG]])
+; CHECK: @__msan_metadata_ptr_for_store_4(ptr %p)
 ; CHECK: store i32
 ; If the new shadow is non-zero, jump to __msan_chain_origin()
 ; CHECK: icmp
@@ -126,9 +124,9 @@ entry:
 ; CHECK: store i32
 ; CHECK: ret void
 
-define void @Store8(i64* nocapture %p, i64 %x) nounwind uwtable sanitize_memory {
+define void @Store8(ptr nocapture %p, i64 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i64 %x, i64* %p
+  store i64 %x, ptr %p
   ret void
 }
 
@@ -142,8 +140,7 @@ entry:
 ; CHECK: icmp
 ; CHECK: br i1
 ; CHECK: {{^[0-9]+}}:
-; CHECK: [[REG:%[0-9]+]] = bitcast i64* %p to i8*
-; CHECK: @__msan_metadata_ptr_for_store_8(i8* [[REG]])
+; CHECK: @__msan_metadata_ptr_for_store_8(ptr %p)
 ; CHECK: store i64
 ; If the new shadow is non-zero, jump to __msan_chain_origin()
 ; CHECK: icmp
@@ -157,9 +154,9 @@ entry:
 ; CHECK: store i64
 ; CHECK: ret void
 
-define void @Store16(i128* nocapture %p, i128 %x) nounwind uwtable sanitize_memory {
+define void @Store16(ptr nocapture %p, i128 %x) nounwind uwtable sanitize_memory {
 entry:
-  store i128 %x, i128* %p
+  store i128 %x, ptr %p
   ret void
 }
 
@@ -173,8 +170,7 @@ entry:
 ; CHECK: icmp
 ; CHECK: br i1
 ; CHECK: {{^[0-9]+}}:
-; CHECK: [[REG:%[0-9]+]] = bitcast i128* %p to i8*
-; CHECK: @__msan_metadata_ptr_for_store_n(i8* [[REG]], i64 16)
+; CHECK: @__msan_metadata_ptr_for_store_n(ptr %p, i64 16)
 ; CHECK: store i128
 ; If the new shadow is non-zero, jump to __msan_chain_origin()
 ; CHECK: icmp
@@ -191,9 +187,9 @@ entry:
 
 ; Check instrumentation of loads
 
-define i8 @Load1(i8* nocapture %p) nounwind uwtable sanitize_memory {
+define i8 @Load1(ptr nocapture %p) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i8, i8* %p
+  %0 = load i8, ptr %p
   ret i8 %0
 }
 
@@ -210,15 +206,15 @@ entry:
 ; Load the value from %p. This is done before accessing the shadow
 ; to ease atomic handling.
 ; CHECK: load i8
-; CHECK: @__msan_metadata_ptr_for_load_1(i8* %p)
+; CHECK: @__msan_metadata_ptr_for_load_1(ptr %p)
 ; Load the shadow and origin.
 ; CHECK: load i8
 ; CHECK: load i32
 
 
-define i16 @Load2(i16* nocapture %p) nounwind uwtable sanitize_memory {
+define i16 @Load2(ptr nocapture %p) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i16, i16* %p
+  %0 = load i16, ptr %p
   ret i16 %0
 }
 
@@ -235,16 +231,15 @@ entry:
 ; Load the value from %p. This is done before accessing the shadow
 ; to ease atomic handling.
 ; CHECK: load i16
-; CHECK: [[REG:%[0-9]+]] = bitcast i16* %p to i8*
-; CHECK: @__msan_metadata_ptr_for_load_2(i8* [[REG]])
+; CHECK: @__msan_metadata_ptr_for_load_2(ptr %p)
 ; Load the shadow and origin.
 ; CHECK: load i16
 ; CHECK: load i32
 
 
-define i32 @Load4(i32* nocapture %p) nounwind uwtable sanitize_memory {
+define i32 @Load4(ptr nocapture %p) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i32, i32* %p
+  %0 = load i32, ptr %p
   ret i32 %0
 }
 
@@ -261,15 +256,14 @@ entry:
 ; Load the value from %p. This is done before accessing the shadow
 ; to ease atomic handling.
 ; CHECK: load i32
-; CHECK: [[REG:%[0-9]+]] = bitcast i32* %p to i8*
-; CHECK: @__msan_metadata_ptr_for_load_4(i8* [[REG]])
+; CHECK: @__msan_metadata_ptr_for_load_4(ptr %p)
 ; Load the shadow and origin.
 ; CHECK: load i32
 ; CHECK: load i32
 
-define i64 @Load8(i64* nocapture %p) nounwind uwtable sanitize_memory {
+define i64 @Load8(ptr nocapture %p) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i64, i64* %p
+  %0 = load i64, ptr %p
   ret i64 %0
 }
 
@@ -286,15 +280,14 @@ entry:
 ; Load the value from %p. This is done before accessing the shadow
 ; to ease atomic handling.
 ; CHECK: load i64
-; CHECK: [[REG:%[0-9]+]] = bitcast i64* %p to i8*
-; CHECK: @__msan_metadata_ptr_for_load_8(i8* [[REG]])
+; CHECK: @__msan_metadata_ptr_for_load_8(ptr %p)
 ; Load the shadow and origin.
 ; CHECK: load i64
 ; CHECK: load i32
 
-define i128 @Load16(i128* nocapture %p) nounwind uwtable sanitize_memory {
+define i128 @Load16(ptr nocapture %p) nounwind uwtable sanitize_memory {
 entry:
-  %0 = load i128, i128* %p
+  %0 = load i128, ptr %p
   ret i128 %0
 }
 
@@ -311,8 +304,7 @@ entry:
 ; Load the value from %p. This is done before accessing the shadow
 ; to ease atomic handling.
 ; CHECK: load i128
-; CHECK: [[REG:%[0-9]+]] = bitcast i128* %p to i8*
-; CHECK: @__msan_metadata_ptr_for_load_n(i8* [[REG]], i64 16)
+; CHECK: @__msan_metadata_ptr_for_load_n(ptr %p, i64 16)
 ; Load the shadow and origin.
 ; CHECK: load i128
 ; CHECK: load i32
@@ -320,21 +312,19 @@ entry:
 
 ; Test kernel-specific va_list instrumentation
 
-%struct.__va_list_tag = type { i32, i32, i8*, i8* }
-declare void @llvm.va_start(i8*) nounwind
-declare void @llvm.va_end(i8*)
+%struct.__va_list_tag = type { i32, i32, ptr, ptr }
+declare void @llvm.va_start(ptr) nounwind
+declare void @llvm.va_end(ptr)
 @.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
-declare dso_local i32 @VAListFn(i8*, %struct.__va_list_tag*) local_unnamed_addr
+declare dso_local i32 @VAListFn(ptr, ptr) local_unnamed_addr
 
 ; Function Attrs: nounwind uwtable
-define dso_local i32 @VarArgFn(i8* %fmt, ...) local_unnamed_addr sanitize_memory #0 {
+define dso_local i32 @VarArgFn(ptr %fmt, ...) local_unnamed_addr sanitize_memory #0 {
 entry:
   %args = alloca [1 x %struct.__va_list_tag], align 16
-  %0 = bitcast [1 x %struct.__va_list_tag]* %args to i8*
-  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %args, i64 0, i64 0
-  call void @llvm.va_start(i8* nonnull %0)
-  %call = call i32 @VAListFn(i8* %fmt, %struct.__va_list_tag* nonnull %arraydecay)
-  call void @llvm.va_end(i8* nonnull %0)
+  call void @llvm.va_start(ptr nonnull %args)
+  %call = call i32 @VAListFn(ptr %fmt, ptr nonnull %args)
+  call void @llvm.va_end(ptr nonnull %args)
   ret i32 %call
 }
 
@@ -347,21 +337,19 @@ attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
 ; CHECK: [[VA_ARG_ORIGIN:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 3
 ; CHECK: [[VA_ARG_OVERFLOW_SIZE:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 4
 
-; CHECK: [[OSIZE:%[0-9]+]] = load i64, i64* [[VA_ARG_OVERFLOW_SIZE]]
+; CHECK: [[OSIZE:%[0-9]+]] = load i64, ptr [[VA_ARG_OVERFLOW_SIZE]]
 ; Register save area is 48 bytes for non-SSE builds.
 ; CHECK: [[SIZE:%[0-9]+]] = add i64 48, [[OSIZE]]
 ; CHECK: [[SHADOWS:%[0-9]+]] = alloca i8, i64 [[SIZE]]
-; CHECK: [[VA_ARG_SHADOW]]
-; CHECK: call void @llvm.memcpy{{.*}}(i8* align 8 [[SHADOWS]], {{.*}}, i64 [[SIZE]]
+; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[SHADOWS]], ptr align 8 [[VA_ARG_SHADOW]], i64 [[SIZE]]
 ; CHECK: [[ORIGINS:%[0-9]+]] = alloca i8, i64 [[SIZE]]
-; CHECK: [[VA_ARG_ORIGIN]]
-; CHECK: call void @llvm.memcpy{{.*}}(i8* align 8 [[ORIGINS]], {{.*}}, i64 [[SIZE]]
+; CHECK: call void @llvm.memcpy{{.*}}(ptr align 8 [[ORIGINS]], ptr align 8 [[VA_ARG_ORIGIN]], i64 [[SIZE]]
 ; CHECK: call i32 @VAListFn
 
 ; Function Attrs: nounwind uwtable
 define dso_local void @VarArgCaller() local_unnamed_addr sanitize_memory {
 entry:
-  %call = tail call i32 (i8*, ...) @VarArgFn(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 123)
+  %call = tail call i32 (ptr, ...) @VarArgFn(ptr @.str, i32 123)
   ret void
 }
 
@@ -374,19 +362,19 @@ entry:
 ; CHECK: [[VA_ARG_OVERFLOW_SIZE:%[a-z0-9_]+]] = getelementptr {{.*}} i32 0, i32 4
 
 ; CHECK: [[PARAM_SI:%[_a-z0-9]+]] = ptrtoint {{.*}} [[PARAM_SHADOW]]
-; CHECK: [[ARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[PARAM_SI]] to i64*
+; CHECK: [[ARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[PARAM_SI]] to ptr
 ; First argument is initialized
-; CHECK: store i64 0, i64* [[ARG1_S]]
+; CHECK: store i64 0, ptr [[ARG1_S]]
 
 ; Dangling cast of va_arg_shadow[0], unused because the first argument is fixed.
 ; CHECK: [[VA_CAST0:%[_a-z0-9]+]] = ptrtoint {{.*}} [[VA_ARG_SHADOW]] to i64
 
 ; CHECK: [[VA_CAST1:%[_a-z0-9]+]] = ptrtoint {{.*}} [[VA_ARG_SHADOW]] to i64
 ; CHECK: [[ARG1_SI:%[_a-z0-9]+]] = add i64 [[VA_CAST1]], 8
-; CHECK: [[PARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[ARG1_SI]] to i32*
+; CHECK: [[PARG1_S:%[_a-z0-9]+]] = inttoptr i64 [[ARG1_SI]] to ptr
 
 ; Shadow for 123 is 0.
-; CHECK: store i32 0, i32* [[ARG1_S]]
+; CHECK: store i32 0, ptr [[ARG1_S]]
 
-; CHECK: store i64 0, i64* [[VA_ARG_OVERFLOW_SIZE]]
-; CHECK: call i32 (i8*, ...) @VarArgFn({{.*}} @.str{{.*}} i32 123)
+; CHECK: store i64 0, ptr [[VA_ARG_OVERFLOW_SIZE]]
+; CHECK: call i32 (ptr, ...) @VarArgFn({{.*}} @.str{{.*}} i32 123)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
index 9f91b314487e2..2cc8fd645c596 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
@@ -6,33 +6,31 @@
 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-%class.Foo = type { i32 (...)** }
+%class.Foo = type { ptr }
 @flag = dso_local local_unnamed_addr global i8 0, align 1
 
-define dso_local %class.Foo* @_Z1fv() local_unnamed_addr #0 {
+define dso_local ptr @_Z1fv() local_unnamed_addr #0 {
 entry:
-  %p = alloca i8*, align 8
-  %0 = bitcast i8** %p to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  %1 = load i8, i8* @flag, align 1
-  %tobool = icmp ne i8 %1, 0
-  %call = call zeroext i1 @_Z2f1PPvb(i8** nonnull %p, i1 zeroext %tobool)
-  %2 = load i8*, i8** %p, align 8
-  %3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %2)
-  %4 = bitcast i8* %3 to %class.Foo*
-  %retval.0 = select i1 %call, %class.Foo* %4, %class.Foo* null
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
-  ret %class.Foo* %retval.0
+  %p = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %p)
+  %0 = load i8, ptr @flag, align 1
+  %tobool = icmp ne i8 %0, 0
+  %call = call zeroext i1 @_Z2f1PPvb(ptr nonnull %p, i1 zeroext %tobool)
+  %1 = load ptr, ptr %p, align 8
+  %2 = call ptr @llvm.launder.invariant.group.p0(ptr %1)
+  %retval.0 = select i1 %call, ptr %2, ptr null
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %p)
+  ret ptr %retval.0
 }
 
 ; CHECK-NOT: call void @__msan_warning_with_origin_noreturn
 
-declare dso_local zeroext i1 @_Z2f1PPvb(i8**, i1 zeroext) local_unnamed_addr
+declare dso_local zeroext i1 @_Z2f1PPvb(ptr, i1 zeroext) local_unnamed_addr
 
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare ptr @llvm.launder.invariant.group.p0(ptr)
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 attributes #0 = { sanitize_memory uwtable }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_strip_invariant.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_strip_invariant.ll
index 6ea00e42c8f5f..3323947414565 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_strip_invariant.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_strip_invariant.ll
@@ -8,14 +8,14 @@ target triple = "x86_64-unknown-linux-gnu"
 
 @flag = dso_local local_unnamed_addr global i8 0, align 1
 
-define dso_local i8* @f(i8* %x) local_unnamed_addr #0 {
+define dso_local ptr @f(ptr %x) local_unnamed_addr #0 {
 entry:
-  %0 = call i8* @llvm.strip.invariant.group.p0i8(i8* %x)
-  ret i8* %0
+  %0 = call ptr @llvm.strip.invariant.group.p0(ptr %x)
+  ret ptr %0
 }
 
 ; CHECK-NOT: call void @__msan_warning_with_origin_noreturn
 
-declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+declare ptr @llvm.strip.invariant.group.p0(ptr)
 
 attributes #0 = { sanitize_memory uwtable }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
index c3d03c6342ee7..5eeba197c7632 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
@@ -38,14 +38,14 @@ entry:
   %bit = alloca i8, align 1
   %value = alloca i64, align 8
   %nr = alloca i64, align 8
-  %addr = alloca i64*, align 8
-  store i32 0, i32* %retval, align 4
-  store i64 2, i64* %value, align 8
-  store i64 0, i64* %nr, align 8
-  store i64* %value, i64** %addr, align 8
-  %0 = load i64, i64* %nr, align 8
-  call void asm "btsq $2, $1; setc $0", "=*qm,=*m,Ir,~{dirflag},~{fpsr},~{flags}"(i8* elementtype(i8) %bit, i64** elementtype(i64*) %addr, i64 %0)
-  %1 = load i8, i8* %bit, align 1
+  %addr = alloca ptr, align 8
+  store i32 0, ptr %retval, align 4
+  store i64 2, ptr %value, align 8
+  store i64 0, ptr %nr, align 8
+  store ptr %value, ptr %addr, align 8
+  %0 = load i64, ptr %nr, align 8
+  call void asm "btsq $2, $1; setc $0", "=*qm,=*m,Ir,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i8) %bit, ptr elementtype(ptr) %addr, i64 %0)
+  %1 = load i8, ptr %bit, align 1
   %tobool = trunc i8 %1 to i1
   br i1 %tobool, label %if.then, label %if.else
 
@@ -57,20 +57,16 @@ if.else:                                          ; preds = %entry
 }
 
 ; %nr is first poisoned, then unpoisoned (written to). Need to optimize this in the future.
-; CHECK: [[NRC1:%.*]] = bitcast i64* %nr to i8*
-; CHECK: call void @__msan_poison_alloca(i8* [[NRC1]]{{.*}})
-; CHECK: [[NRC2:%.*]] = bitcast i64* %nr to i8*
-; CHECK: call { i8*, i32* } @__msan_metadata_ptr_for_store_8(i8* [[NRC2]])
+; CHECK: call void @__msan_poison_alloca(ptr %nr{{.*}})
+; CHECK: call { ptr, ptr } @__msan_metadata_ptr_for_store_8(ptr %nr)
 
 ; Hooks for inputs usually go before the assembly statement. But here we have none,
 ; because %nr is passed by value. However we check %nr for being initialized.
-; CHECK-CONS: [[NRC3:%.*]] = bitcast i64* %nr to i8*
-; CHECK-CONS: call { i8*, i32* } @__msan_metadata_ptr_for_load_8(i8* [[NRC3]])
+; CHECK-CONS: call { ptr, ptr } @__msan_metadata_ptr_for_load_8(ptr %nr)
 
 ; In the conservative mode, call the store hooks for %bit and %addr:
-; CHECK-CONS: call void @__msan_instrument_asm_store(i8* %bit, i64 1)
-; CHECK-CONS: [[ADDR8S:%.*]] = bitcast i64** %addr to i8*
-; CHECK-CONS: call void @__msan_instrument_asm_store(i8* [[ADDR8S]], i64 8)
+; CHECK-CONS: call void @__msan_instrument_asm_store(ptr %bit, i64 1)
+; CHECK-CONS: call void @__msan_instrument_asm_store(ptr %addr, i64 8)
 
 ; Landing pad for the %nr check above.
 ; CHECK-CONS: call void @__msan_warning
@@ -82,8 +78,8 @@ if.else:                                          ; preds = %entry
 ; CHECKz: [[SH_NUM:%.*]] = xor i64 [[PTR]]
 ; CHECKz: [[SHADOW:%.*]] = inttoptr i64 [[SH_NUM]] {{.*}}
 
-; CHECK: [[META:%.*]] = call {{.*}} @__msan_metadata_ptr_for_load_1(i8* %bit)
-; CHECK: [[SHADOW:%.*]] = extractvalue { i8*, i32* } [[META]], 0
+; CHECK: [[META:%.*]] = call {{.*}} @__msan_metadata_ptr_for_load_1(ptr %bit)
+; CHECK: [[SHADOW:%.*]] = extractvalue { ptr, ptr } [[META]], 0
 
 ; Now load the shadow value for the boolean.
 ; CHECK: [[MSLD:%.*]] = load {{.*}} [[SHADOW]]

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
index 3bfd663d9dd5b..a83a94a06b98f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_x86intrinsics.ll
@@ -10,38 +10,38 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; Store intrinsic.
 
-define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
-  call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
+define void @StoreIntrinsic(ptr %p, <4 x float> %x) nounwind uwtable sanitize_memory {
+  call void @llvm.x86.sse.storeu.ps(ptr %p, <4 x float> %x)
   ret void
 }
 
-declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
+declare void @llvm.x86.sse.storeu.ps(ptr, <4 x float>) nounwind
 
 ; CHECK-LABEL: @StoreIntrinsic
 ; CHECK-NOT: br
 ; CHECK-NOT: = or
 ; CHECK: store <4 x i32> {{.*}} align 1
-; CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
+; CHECK: store <4 x float> %{{.*}}, ptr %{{.*}}, align 1{{$}}
 ; CHECK: ret void
 
 
 ; Load intrinsic.
 
-define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
-  %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
+define <16 x i8> @LoadIntrinsic(ptr %p) nounwind uwtable sanitize_memory {
+  %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(ptr %p)
   ret <16 x i8> %call
 }
 
-declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
+declare <16 x i8> @llvm.x86.sse3.ldu.dq(ptr %p) nounwind
 
 ; CHECK-LABEL: @LoadIntrinsic
-; CHECK: load <16 x i8>, <16 x i8>* {{.*}} align 1
-; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32, i32* {{.*}}
+; CHECK: load <16 x i8>, ptr {{.*}} align 1
+; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32, ptr {{.*}}
 ; CHECK-NOT: br
 ; CHECK-NOT: = or
 ; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
 ; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
-; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
+; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], ptr @__msan_retval_origin_tls
 ; CHECK: ret <16 x i8>
 
 
@@ -57,10 +57,10 @@ define <8 x i16> @Pmulhuw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable saniti
 declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a, <8 x i16> %b) nounwind
 
 ; CHECK-LABEL: @Pmulhuw128
-; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
-; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
-; CHECK-NEXT: load <8 x i16>, <8 x i16>* {{.*}} @__msan_param_tls
-; CHECK-ORIGINS: load i32, i32* {{.*}} @__msan_param_origin_tls
+; CHECK-NEXT: load <8 x i16>, ptr @__msan_param_tls
+; CHECK-ORIGINS: load i32, ptr @__msan_param_origin_tls
+; CHECK-NEXT: load <8 x i16>, ptr {{.*}} @__msan_param_tls
+; CHECK-ORIGINS: load i32, ptr {{.*}} @__msan_param_origin_tls
 ; CHECK-NEXT: call void @llvm.donothing
 ; CHECK-NEXT: = or <8 x i16>
 ; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/mul_by_constant.ll b/llvm/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
index dfc9b46af445a..50a4a1b17df27 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/mul_by_constant.ll
@@ -19,7 +19,7 @@ entry:
 ; CHECK-LABEL: @MulConst(
 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
 ; CHECK: [[B:%.*]] = mul i64 [[A]], 68719476736
-; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+; CHECK: store i64 [[B]], ptr @__msan_retval_tls
 
 
 define i64 @MulZero(i64 %x) sanitize_memory {
@@ -31,7 +31,7 @@ entry:
 ; CHECK-LABEL: @MulZero(
 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
 ; CHECK: [[B:%.*]] = mul i64 [[A]], 0{{$}}
-; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+; CHECK: store i64 [[B]], ptr @__msan_retval_tls
 
 
 define i64 @MulNeg(i64 %x) sanitize_memory {
@@ -43,7 +43,7 @@ entry:
 ; CHECK-LABEL: @MulNeg(
 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
 ; CHECK: [[B:%.*]] = mul i64 [[A]], 16
-; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+; CHECK: store i64 [[B]], ptr @__msan_retval_tls
 
 
 define i64 @MulNeg2(i64 %x) sanitize_memory {
@@ -55,7 +55,7 @@ entry:
 ; CHECK-LABEL: @MulNeg2(
 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
 ; CHECK: [[B:%.*]] = mul i64 [[A]], 16
-; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+; CHECK: store i64 [[B]], ptr @__msan_retval_tls
 
 
 define i64 @MulOdd(i64 %x) sanitize_memory {
@@ -67,7 +67,7 @@ entry:
 ; CHECK-LABEL: @MulOdd(
 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
 ; CHECK: [[B:%.*]] = mul i64 [[A]], 1
-; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+; CHECK: store i64 [[B]], ptr @__msan_retval_tls
 
 
 define i64 @MulLarge(i64 %x) sanitize_memory {
@@ -81,7 +81,7 @@ entry:
 ; CHECK-LABEL: @MulLarge(
 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
 ; CHECK: [[B:%.*]] = mul i64 [[A]], -9223372036854775808
-; CHECK: store i64 [[B]], i64* {{.*}} @__msan_retval_tls
+; CHECK: store i64 [[B]], ptr @__msan_retval_tls
 
 define <4 x i32> @MulVectorConst(<4 x i32> %x) sanitize_memory {
 entry:
@@ -92,13 +92,13 @@ entry:
 ; CHECK-LABEL: @MulVectorConst(
 ; CHECK: [[A:%.*]] = load {{.*}} @__msan_param_tls
 ; CHECK: [[B:%.*]] = mul <4 x i32> [[A]], <i32 1024, i32 0, i32 16, i32 16>
-; CHECK: store <4 x i32> [[B]], <4 x i32>* {{.*}} @__msan_retval_tls
+; CHECK: store <4 x i32> [[B]], ptr @__msan_retval_tls
 
 
 ; The constant in multiplication does not have to be a literal integer constant.
- at X = linkonce_odr global i8* null
+ at X = linkonce_odr global ptr null
 define i64 @MulNonIntegerConst(i64 %a) sanitize_memory {
-  %mul = mul i64 %a, ptrtoint (i8** @X to i64)
+  %mul = mul i64 %a, ptrtoint (ptr @X to i64)
   ret i64 %mul
 }
 
@@ -108,7 +108,7 @@ define i64 @MulNonIntegerConst(i64 %a) sanitize_memory {
 ; CHECK: store i64 [[B]], {{.*}}@__msan_retval_tls
 
 define <2 x i64> @MulNonIntegerVectorConst(<2 x i64> %a) sanitize_memory {
-  %mul = mul <2 x i64> %a, <i64 3072, i64 ptrtoint (i8** @X to i64)>
+  %mul = mul <2 x i64> %a, <i64 3072, i64 ptrtoint (ptr @X to i64)>
   ret <2 x i64> %mul
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/no-check-rt-unaligned.ll b/llvm/test/Instrumentation/MemorySanitizer/no-check-rt-unaligned.ll
index 2d9280f9767fa..ce71097ce8ba4 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/no-check-rt-unaligned.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/no-check-rt-unaligned.ll
@@ -3,14 +3,14 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare noundef i8 @__sanitizer_unaligned_load8(i8* noundef)
-declare void @__sanitizer_unaligned_store8(i8* noundef, i8 noundef)
+declare noundef i8 @__sanitizer_unaligned_load8(ptr noundef)
+declare void @__sanitizer_unaligned_store8(ptr noundef, i8 noundef)
 
-define noundef i8 @unaligned_load(i8* noundef %ptr) sanitize_memory {
-; CHECK: @unaligned_load(i8* {{.*}}[[PTR:%.+]])
+define noundef i8 @unaligned_load(ptr noundef %ptr) sanitize_memory {
+; CHECK: @unaligned_load(ptr {{.*}}[[PTR:%.+]])
 ; CHECK: store i64 0, {{.*}} @__msan_param_tls
-; CHECK: [[VAL:%.*]] = call noundef i8 @__sanitizer_unaligned_load8(i8* noundef [[PTR]])
-  %val = call noundef i8 @__sanitizer_unaligned_load8(i8* noundef %ptr)
+; CHECK: [[VAL:%.*]] = call noundef i8 @__sanitizer_unaligned_load8(ptr noundef [[PTR]])
+  %val = call noundef i8 @__sanitizer_unaligned_load8(ptr noundef %ptr)
 ; CHECK: load {{.*}} @__msan_retval_tls
 ; CHECK-ORIGIN: load {{.*}} @__msan_retval_origin_tls
 ; CHECK: call void @__msan_warning_{{.*}}noreturn
@@ -18,12 +18,12 @@ define noundef i8 @unaligned_load(i8* noundef %ptr) sanitize_memory {
   ret i8 %val
 }
 
-define void @unaligned_store(i8* noundef %ptr, i8 noundef %val) sanitize_memory {
-; CHECK: @unaligned_store(i8* {{.*}}[[PTR:%.+]], i8 {{.*}}[[VAL:%.+]])
+define void @unaligned_store(ptr noundef %ptr, i8 noundef %val) sanitize_memory {
+; CHECK: @unaligned_store(ptr {{.*}}[[PTR:%.+]], i8 {{.*}}[[VAL:%.+]])
 ; CHECK: store i64 0, {{.*}} @__msan_param_tls
 ; CHECK: store i8 0, {{.*}} @__msan_param_tls
-; CHECK: call void @__sanitizer_unaligned_store8(i8* noundef [[PTR]], i8 noundef [[VAL]])
-  call void @__sanitizer_unaligned_store8(i8* noundef %ptr, i8 noundef %val)
+; CHECK: call void @__sanitizer_unaligned_store8(ptr noundef [[PTR]], i8 noundef [[VAL]])
+  call void @__sanitizer_unaligned_store8(ptr noundef %ptr, i8 noundef %val)
 ; CHECK: ret void
   ret void
 }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/nosanitize.ll b/llvm/test/Instrumentation/MemorySanitizer/nosanitize.ll
index df6e8be9951ff..5aaca33c2f0fe 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/nosanitize.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/nosanitize.ll
@@ -20,9 +20,9 @@ define void @foo() {
 @__sancov_gen_ = private global [1 x i8] zeroinitializer, section "__sancov_cntrs", align 1
 define void @sancov() sanitize_memory {
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), !nosanitize !{}
+  %0 = load i8, ptr @__sancov_gen_, !nosanitize !{}
   %1 = add i8 %0, 1
-  store i8 %1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), !nosanitize !{}
+  store i8 %1, ptr @__sancov_gen_, !nosanitize !{}
   ret void
 }
 
@@ -35,10 +35,10 @@ entry:
 define void @load_store() sanitize_memory {
 entry:
   %x = alloca i32, align 4, !nosanitize !{}
-  store i32 4, i32* %x, align 4, !nosanitize !{}
-  %0 = load i32, i32* %x, align 4, !nosanitize !{}
+  store i32 4, ptr %x, align 4, !nosanitize !{}
+  %0 = load i32, ptr %x, align 4, !nosanitize !{}
   %add = add nsw i32 %0, %0
-  store i32 %add, i32* %x, align 4, !nosanitize !{}
+  store i32 %add, ptr %x, align 4, !nosanitize !{}
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll b/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll
index 07ee832fc5407..24276a28fdd70 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/opaque-ptr.ll
@@ -13,7 +13,7 @@ define void @test_memcpy(ptr %p, ptr byval(i32) %p2) sanitize_memory {
 ; CHECK-NEXT:    [[TMP4:%.*]] = call ptr @__msan_memcpy(ptr [[P:%.*]], ptr [[P2]], i64 4)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memcpy.p0.p0.i64(i8* %p, i8* %p2, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %p, ptr %p2, i64 4, i1 false)
   ret void
 }
 
@@ -27,9 +27,9 @@ define void @test_memmove(ptr %p, ptr byval(i32) %p2) sanitize_memory {
 ; CHECK-NEXT:    [[TMP4:%.*]] = call ptr @__msan_memmove(ptr [[P:%.*]], ptr [[P2]], i64 4)
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.memmove.p0.p0.i64(i8* %p, i8* %p2, i64 4, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %p, ptr %p2, i64 4, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0.p0.i64(i8*, i8*, i64, i1)
-declare void @llvm.memmove.p0.p0.i64(i8*, i8*, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr, ptr, i64, i1)

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/origin-alignment.ll b/llvm/test/Instrumentation/MemorySanitizer/origin-alignment.ll
index 052bc46c83f18..7a3ef66956824 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/origin-alignment.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/origin-alignment.ll
@@ -20,7 +20,7 @@ target triple = "x86_64-unknown-linux-gnu"
 ; 8-aligned store => 8-aligned origin store, origin address is not realigned
 define void @Store8(i8 %x) sanitize_memory {
 entry:
-  store i8 %x, i8* @a8, align 8
+  store i8 %x, ptr @a8, align 8
   ret void
 }
 
@@ -35,7 +35,7 @@ entry:
 ; 4-aligned store => 4-aligned origin store, origin address is not realigned
 define void @Store4(i8 %x) sanitize_memory {
 entry:
-  store i8 %x, i8* @a4, align 4
+  store i8 %x, ptr @a4, align 4
   ret void
 }
 
@@ -50,7 +50,7 @@ entry:
 ; 2-aligned store => 4-aligned origin store, origin address is realigned
 define void @Store2(i8 %x) sanitize_memory {
 entry:
-  store i8 %x, i8* @a2, align 2
+  store i8 %x, ptr @a2, align 2
   ret void
 }
 
@@ -65,7 +65,7 @@ entry:
 ; 1-aligned store => 4-aligned origin store, origin address is realigned
 define void @Store1(i8 %x) sanitize_memory {
 entry:
-  store i8 %x, i8* @a1, align 1
+  store i8 %x, ptr @a1, align 1
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/origin-array.ll b/llvm/test/Instrumentation/MemorySanitizer/origin-array.ll
index 831e3665c2d18..228d686ad7f97 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/origin-array.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/origin-array.ll
@@ -6,9 +6,9 @@ target triple = "aarch64-unknown-linux-gnu"
 
 ; Check origin handling of array types.
 
-define void @foo([2 x i64] %v, [2 x i64]* %p) sanitize_memory {
+define void @foo([2 x i64] %v, ptr %p) sanitize_memory {
 entry:
-  store [2 x i64] %v, [2 x i64]* %p, align 8
+  store [2 x i64] %v, ptr %p, align 8
   ret void
 }
 
@@ -18,7 +18,7 @@ entry:
 
 ; CHECK: [[TMP1:%[01-9a-z]+]] = ptrtoint
 ; CHECK: [[TMP2:%[01-9a-z]+]] = xor i64 [[TMP1]]
-; CHECK: [[TMP3:%[01-9a-z]+]] = inttoptr i64 [[TMP2]] to [2 x i64]*
-; CHECK: store [2 x i64] [[PARAM]], [2 x i64]* [[TMP3]]
+; CHECK: [[TMP3:%[01-9a-z]+]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK: store [2 x i64] [[PARAM]], ptr [[TMP3]]
 
 ; CHECK: {{.*}} call i32 @__msan_chain_origin(i32 {{.*}}[[ORIGIN]])

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/reduce.ll b/llvm/test/Instrumentation/MemorySanitizer/reduce.ll
index 8ddac6f884bcc..0c688811611b8 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/reduce.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/reduce.ll
@@ -11,12 +11,12 @@ declare i32 @llvm.vector.reduce.or(<3 x i32>)
 
 ; CHECK-LABEL: @reduce_add
 define i32 @reduce_add() sanitize_memory {
-; CHECK: [[P:%.*]] = inttoptr i64 0 to <3 x i32>*
-  %p = inttoptr i64 0 to <3 x i32> *
-; CHECK: [[O:%.*]] = load <3 x i32>, <3 x i32>* [[P]]
-  %o = load <3 x i32>, <3 x i32> *%p
-; CHECK: [[O_SHADOW:%.*]] = load <3 x i32>, <3 x i32>*
-; CHECK: [[O_ORIGIN:%.*]] = load i32, i32*
+; CHECK: [[P:%.*]] = inttoptr i64 0 to ptr
+  %p = inttoptr i64 0 to ptr
+; CHECK: [[O:%.*]] = load <3 x i32>, ptr [[P]]
+  %o = load <3 x i32>, ptr %p
+; CHECK: [[O_SHADOW:%.*]] = load <3 x i32>, ptr
+; CHECK: [[O_ORIGIN:%.*]] = load i32, ptr
 ; CHECK: [[R_SHADOW:%.*]] = call i32 @llvm.vector.reduce.or.v3i32(<3 x i32> [[O_SHADOW]])
 ; CHECK: [[R:%.*]] = call i32 @llvm.vector.reduce.add.v3i32(<3 x i32> [[O]])
   %r = call i32 @llvm.vector.reduce.add(<3 x i32> %o)
@@ -28,12 +28,12 @@ define i32 @reduce_add() sanitize_memory {
 
 ; CHECK-LABEL: @reduce_and
 define i32 @reduce_and() sanitize_memory {
-; CHECK: [[P:%.*]] = inttoptr i64 0 to <3 x i32>*
-  %p = inttoptr i64 0 to <3 x i32> *
-; CHECK: [[O:%.*]] = load <3 x i32>, <3 x i32>* [[P]]
-  %o = load <3 x i32>, <3 x i32> *%p
-; CHECK: [[O_SHADOW:%.*]] = load <3 x i32>, <3 x i32>*
-; CHECK: [[O_ORIGIN:%.*]] = load i32, i32*
+; CHECK: [[P:%.*]] = inttoptr i64 0 to ptr
+  %p = inttoptr i64 0 to ptr
+; CHECK: [[O:%.*]] = load <3 x i32>, ptr [[P]]
+  %o = load <3 x i32>, ptr %p
+; CHECK: [[O_SHADOW:%.*]] = load <3 x i32>, ptr
+; CHECK: [[O_ORIGIN:%.*]] = load i32, ptr
 ; CHECK: [[O_SHADOW_1:%.*]] = or <3 x i32> [[O]], [[O_SHADOW]]
 ; CHECK: [[O_SHADOW_2:%.*]] = call i32 @llvm.vector.reduce.and.v3i32(<3 x i32> [[O_SHADOW_1]]
 ; CHECK: [[O_SHADOW_3:%.*]] = call i32 @llvm.vector.reduce.or.v3i32(<3 x i32> [[O_SHADOW]])
@@ -48,12 +48,12 @@ define i32 @reduce_and() sanitize_memory {
 
 ; CHECK-LABEL: @reduce_or
 define i32 @reduce_or() sanitize_memory {
-; CHECK: [[P:%.*]] = inttoptr i64 0 to <3 x i32>*
-  %p = inttoptr i64 0 to <3 x i32> *
-; CHECK: [[O:%.*]] = load <3 x i32>, <3 x i32>* [[P]]
-  %o = load <3 x i32>, <3 x i32> *%p
-; CHECK: [[O_SHADOW:%.*]] = load <3 x i32>, <3 x i32>*
-; CHECK: [[O_ORIGIN:%.*]] = load i32, i32*
+; CHECK: [[P:%.*]] = inttoptr i64 0 to ptr
+  %p = inttoptr i64 0 to ptr
+; CHECK: [[O:%.*]] = load <3 x i32>, ptr [[P]]
+  %o = load <3 x i32>, ptr %p
+; CHECK: [[O_SHADOW:%.*]] = load <3 x i32>, ptr
+; CHECK: [[O_ORIGIN:%.*]] = load i32, ptr
 ; CHECK: [[NOT_O:%.*]] = xor <3 x i32> [[O]], <i32 -1, i32 -1, i32 -1>
 ; CHECK: [[O_SHADOW_1:%.*]] = or <3 x i32> [[NOT_O]], [[O_SHADOW]]
 ; CHECK: [[O_SHADOW_2:%.*]] = call i32 @llvm.vector.reduce.and.v3i32(<3 x i32> [[O_SHADOW_1]]

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/sse-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/sse-intrinsics-x86.ll
index 00ae71a38064c..9d7763a6ef589 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/sse-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/sse-intrinsics-x86.ll
@@ -6,14 +6,14 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_cmp_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <4 x i32> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = sext <4 x i1> [[TMP4]] to <4 x i32>
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <4 x i32> [[TMP5]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP5]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -24,8 +24,8 @@ declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind
 
 define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_cmp_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
@@ -33,7 +33,7 @@ define <4 x float> @test_x86_sse_cmp_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i128
 ; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i128 [[TMP6]] to <4 x i32>
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <4 x i32> [[TMP7]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP7]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -44,15 +44,15 @@ declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind
 
 define i32 @test_x86_sse_comieq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_comieq_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.comieq.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -63,15 +63,15 @@ declare i32 @llvm.x86.sse.comieq.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_comige_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_comige_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.comige.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.comige.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -82,15 +82,15 @@ declare i32 @llvm.x86.sse.comige.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_comigt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_comigt_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.comigt.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.comigt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -101,15 +101,15 @@ declare i32 @llvm.x86.sse.comigt.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_comile_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_comile_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.comile.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.comile.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -120,15 +120,15 @@ declare i32 @llvm.x86.sse.comile.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_comilt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_comilt_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.comilt.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.comilt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -139,15 +139,15 @@ declare i32 @llvm.x86.sse.comilt.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_comineq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_comineq_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.comineq.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.comineq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -158,7 +158,7 @@ declare i32 @llvm.x86.sse.comineq.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_cvtss2si(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
@@ -168,7 +168,7 @@ define i32 @test_x86_sse_cvtss2si(<4 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.cvtss2si(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %a0) ; <i32> [#uses=1]
@@ -179,7 +179,7 @@ declare i32 @llvm.x86.sse.cvtss2si(<4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_cvttss2si(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP2]], 0
@@ -189,7 +189,7 @@ define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.cvttss2si(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.cvttss2si(<4 x float> %a0) ; <i32> [#uses=1]
@@ -198,14 +198,14 @@ define i32 @test_x86_sse_cvttss2si(<4 x float> %a0) #0 {
 declare i32 @llvm.x86.sse.cvttss2si(<4 x float>) nounwind readnone
 
 
-define void @test_x86_sse_ldmxcsr(i8* %a0) #0 {
+define void @test_x86_sse_ldmxcsr(ptr %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_ldmxcsr(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint i8* [[A0:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to i32*
-; CHECK-NEXT:    [[_LDMXCSR:%.*]] = load i32, i32* [[TMP4]], align 1
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    [[_LDMXCSR:%.*]] = load i32, ptr [[TMP4]], align 1
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i32 [[_LDMXCSR]], 0
 ; CHECK-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
@@ -214,24 +214,24 @@ define void @test_x86_sse_ldmxcsr(i8* %a0) #0 {
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR5]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
-; CHECK-NEXT:    call void @llvm.x86.sse.ldmxcsr(i8* [[A0]])
+; CHECK-NEXT:    call void @llvm.x86.sse.ldmxcsr(ptr [[A0]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.sse.ldmxcsr(i8* %a0)
+  call void @llvm.x86.sse.ldmxcsr(ptr %a0)
   ret void
 }
-declare void @llvm.x86.sse.ldmxcsr(i8*) nounwind
+declare void @llvm.x86.sse.ldmxcsr(ptr) nounwind
 
 
 
 define <4 x float> @test_x86_sse_max_ps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_max_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -242,13 +242,13 @@ declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind read
 
 define <4 x float> @test_x86_sse_max_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_max_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 4, i32 1, i32 2, i32 3>
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[TMP4]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -259,12 +259,12 @@ declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind read
 
 define <4 x float> @test_x86_sse_min_ps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_min_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -275,13 +275,13 @@ declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind read
 
 define <4 x float> @test_x86_sse_min_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_min_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 4, i32 1, i32 2, i32 3>
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[TMP4]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
@@ -292,7 +292,7 @@ declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind read
 
 define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_movmsk_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -302,7 +302,7 @@ define i32 @test_x86_sse_movmsk_ps(<4 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) ; <i32> [#uses=1]
@@ -314,10 +314,10 @@ declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
 
 define <4 x float> @test_x86_sse_rcp_ps(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_rcp_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[TMP1]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -328,10 +328,10 @@ declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
 
 define <4 x float> @test_x86_sse_rcp_ss(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_rcp_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[TMP1]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -342,10 +342,10 @@ declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
 
 define <4 x float> @test_x86_sse_rsqrt_ps(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_rsqrt_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[TMP1]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -356,10 +356,10 @@ declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
 
 define <4 x float> @test_x86_sse_rsqrt_ss(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_rsqrt_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[TMP1]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
@@ -368,40 +368,40 @@ define <4 x float> @test_x86_sse_rsqrt_ss(<4 x float> %a0) #0 {
 declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
 
 
-define void @test_x86_sse_stmxcsr(i8* %a0) #0 {
+define void @test_x86_sse_stmxcsr(ptr %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse_stmxcsr(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
-; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint i8* [[A0:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A0:%.*]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to i32*
-; CHECK-NEXT:    store i32 0, i32* [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    store i32 0, ptr [[TMP4]], align 4
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
 ; CHECK:       5:
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR5]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
-; CHECK-NEXT:    call void @llvm.x86.sse.stmxcsr(i8* [[A0]])
+; CHECK-NEXT:    call void @llvm.x86.sse.stmxcsr(ptr [[A0]])
 ; CHECK-NEXT:    ret void
 ;
-  call void @llvm.x86.sse.stmxcsr(i8* %a0)
+  call void @llvm.x86.sse.stmxcsr(ptr %a0)
   ret void
 }
-declare void @llvm.x86.sse.stmxcsr(i8*) nounwind
+declare void @llvm.x86.sse.stmxcsr(ptr) nounwind
 
 
 define i32 @test_x86_sse_ucomieq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_ucomieq_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -412,15 +412,15 @@ declare i32 @llvm.x86.sse.ucomieq.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_ucomige_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_ucomige_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.ucomige.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.ucomige.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -431,15 +431,15 @@ declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_ucomigt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_ucomigt_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.ucomigt.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.ucomigt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -450,15 +450,15 @@ declare i32 @llvm.x86.sse.ucomigt.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_ucomile_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_ucomile_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.ucomile.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.ucomile.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -469,15 +469,15 @@ declare i32 @llvm.x86.sse.ucomile.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_ucomilt_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_ucomilt_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.ucomilt.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.ucomilt.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]
@@ -488,15 +488,15 @@ declare i32 @llvm.x86.sse.ucomilt.ss(<4 x float>, <4 x float>) nounwind readnone
 
 define i32 @test_x86_sse_ucomineq_ss(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse_ucomineq_ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <4 x i32> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse.ucomineq.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse.ucomineq.ss(<4 x float> %a0, <4 x float> %a1) ; <i32> [#uses=1]

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/sse2-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/sse2-intrinsics-x86.ll
index e57f293d02a4b..e9323f6dd3308 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/sse2-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/sse2-intrinsics-x86.ll
@@ -6,14 +6,14 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define <2 x double> @test_x86_sse2_cmp_pd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cmp_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = sext <2 x i1> [[TMP4]] to <2 x i64>
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <2 x i64> [[TMP5]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
@@ -24,8 +24,8 @@ declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8) nounw
 
 define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cmp_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
@@ -33,7 +33,7 @@ define <2 x double> @test_x86_sse2_cmp_sd(<2 x double> %a0, <2 x double> %a1) #0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i128
 ; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i128 [[TMP6]] to <2 x i64>
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <2 x i64> [[TMP7]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP7]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
@@ -44,15 +44,15 @@ declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounw
 
 define i32 @test_x86_sse2_comieq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_comieq_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -63,15 +63,15 @@ declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readno
 
 define i32 @test_x86_sse2_comige_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_comige_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.comige.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.comige.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -82,15 +82,15 @@ declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readno
 
 define i32 @test_x86_sse2_comigt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_comigt_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.comigt.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.comigt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -101,15 +101,15 @@ declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readno
 
 define i32 @test_x86_sse2_comile_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_comile_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.comile.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.comile.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -120,15 +120,15 @@ declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readno
 
 define i32 @test_x86_sse2_comilt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_comilt_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.comilt.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.comilt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -139,15 +139,15 @@ declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readno
 
 define i32 @test_x86_sse2_comineq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_comineq_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.comineq.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.comineq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -158,7 +158,7 @@ declare i32 @llvm.x86.sse2.comineq.sd(<2 x double>, <2 x double>) nounwind readn
 
 define <4 x i32> @test_x86_sse2_cvtpd2dq(<2 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtpd2dq(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -168,7 +168,7 @@ define <4 x i32> @test_x86_sse2_cvtpd2dq(<2 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
@@ -179,7 +179,7 @@ declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_cvtpd_epi32_zext(<2 x double> %a0) nounwind #0 {
 ; CHECK-LABEL: @test_mm_cvtpd_epi32_zext(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -191,7 +191,7 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext(<2 x double> %a0) nounwind #0 {
 ; CHECK-NEXT:    [[CVT:%.*]] = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> [[A0:%.*]])
 ; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[CVT]], <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
 ; CHECK-NEXT:    [[BC:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[BC]]
 ;
   %cvt = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
@@ -201,9 +201,9 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext(<2 x double> %a0) nounwind #0 {
 }
 
 
-define <2 x i64> @test_mm_cvtpd_epi32_zext_load(<2 x double>* %p0) nounwind #0 {
+define <2 x i64> @test_mm_cvtpd_epi32_zext_load(ptr %p0) nounwind #0 {
 ; CHECK-LABEL: @test_mm_cvtpd_epi32_zext_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
@@ -211,11 +211,11 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext_load(<2 x double>* %p0) nounwind #0 {
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       3:
-; CHECK-NEXT:    [[A0:%.*]] = load <2 x double>, <2 x double>* [[P0:%.*]], align 16
-; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint <2 x double>* [[P0]] to i64
+; CHECK-NEXT:    [[A0:%.*]] = load <2 x double>, ptr [[P0:%.*]], align 16
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P0]] to i64
 ; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
-; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to <2 x i64>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP6]], align 16
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 16
 ; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <2 x i64> [[_MSLD]] to i128
 ; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP1]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
@@ -226,10 +226,10 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext_load(<2 x double>* %p0) nounwind #0 {
 ; CHECK-NEXT:    [[CVT:%.*]] = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> [[A0]])
 ; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[CVT]], <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
 ; CHECK-NEXT:    [[BC:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[BC]]
 ;
-  %a0 = load <2 x double>, <2 x double>* %p0
+  %a0 = load <2 x double>, ptr %p0
   %cvt = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
   %res = shufflevector <4 x i32> %cvt, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -239,7 +239,7 @@ define <2 x i64> @test_mm_cvtpd_epi32_zext_load(<2 x double>* %p0) nounwind #0 {
 
 define <4 x float> @test_x86_sse2_cvtpd2ps(<2 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtpd2ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -249,7 +249,7 @@ define <4 x float> @test_x86_sse2_cvtpd2ps(<2 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0) ; <<4 x float>> [#uses=1]
@@ -259,7 +259,7 @@ declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone
 
 define <4 x float> @test_x86_sse2_cvtpd2ps_zext(<2 x double> %a0) nounwind #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtpd2ps_zext(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -270,7 +270,7 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext(<2 x double> %a0) nounwind #0 {
 ; CHECK:       4:
 ; CHECK-NEXT:    [[CVT:%.*]] = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> [[A0:%.*]])
 ; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x float> [[CVT]], <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %cvt = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
@@ -278,9 +278,9 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext(<2 x double> %a0) nounwind #0 {
   ret <4 x float> %res
 }
 
-define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(<2 x double>* %p0) nounwind #0 {
+define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(ptr %p0) nounwind #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtpd2ps_zext_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
@@ -288,11 +288,11 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(<2 x double>* %p0) nounwind
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       3:
-; CHECK-NEXT:    [[A0:%.*]] = load <2 x double>, <2 x double>* [[P0:%.*]], align 16
-; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint <2 x double>* [[P0]] to i64
+; CHECK-NEXT:    [[A0:%.*]] = load <2 x double>, ptr [[P0:%.*]], align 16
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P0]] to i64
 ; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
-; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to <2 x i64>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP6]], align 16
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 16
 ; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <2 x i64> [[_MSLD]] to i128
 ; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP1]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
@@ -302,10 +302,10 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(<2 x double>* %p0) nounwind
 ; CHECK:       9:
 ; CHECK-NEXT:    [[CVT:%.*]] = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> [[A0]])
 ; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x float> [[CVT]], <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
-  %a0 = load <2 x double>, <2 x double>* %p0
+  %a0 = load <2 x double>, ptr %p0
   %cvt = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
   %res = shufflevector <4 x float> %cvt, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   ret <4 x float> %res
@@ -313,7 +313,7 @@ define <4 x float> @test_x86_sse2_cvtpd2ps_zext_load(<2 x double>* %p0) nounwind
 
 define <4 x i32> @test_x86_sse2_cvtps2dq(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtps2dq(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -323,7 +323,7 @@ define <4 x i32> @test_x86_sse2_cvtps2dq(<4 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
@@ -334,7 +334,7 @@ declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone
 
 define i32 @test_x86_sse2_cvtsd2si(<2 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtsd2si(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
@@ -344,7 +344,7 @@ define i32 @test_x86_sse2_cvtsd2si(<2 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0) ; <i32> [#uses=1]
@@ -355,8 +355,8 @@ declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
 
 define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0
@@ -367,7 +367,7 @@ define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) #0
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[TMP4]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP4]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
@@ -376,10 +376,10 @@ define <4 x float> @test_x86_sse2_cvtsd2ss(<4 x float> %a0, <2 x double> %a1) #0
 declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone
 
 
-define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, <2 x double>* %p1) #0 {
+define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, ptr %p1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
@@ -387,11 +387,11 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, <2 x double>* %
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A1:%.*]] = load <2 x double>, <2 x double>* [[P1:%.*]], align 16
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <2 x double>* [[P1]] to i64
+; CHECK-NEXT:    [[A1:%.*]] = load <2 x double>, ptr [[P1:%.*]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P1]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <2 x i64>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP7]], align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP7]], align 16
 ; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i64> [[_MSLD]], i32 0
 ; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0
 ; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP8]], 0
@@ -401,19 +401,19 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load(<4 x float> %a0, <2 x double>* %
 ; CHECK-NEXT:    unreachable
 ; CHECK:       11:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> [[A0:%.*]], <2 x double> [[A1]])
-; CHECK-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
-  %a1 = load <2 x double>, <2 x double>* %p1
+  %a1 = load <2 x double>, ptr %p1
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
 
 
-define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, <2 x double>* %p1) optsize #0 {
+define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, ptr %p1) optsize #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvtsd2ss_load_optsize(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
@@ -421,11 +421,11 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, <2 x do
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A1:%.*]] = load <2 x double>, <2 x double>* [[P1:%.*]], align 16
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <2 x double>* [[P1]] to i64
+; CHECK-NEXT:    [[A1:%.*]] = load <2 x double>, ptr [[P1:%.*]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P1]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <2 x i64>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP7]], align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP7]], align 16
 ; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i64> [[_MSLD]], i32 0
 ; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <4 x i32> [[TMP2]], i32 0, i32 0
 ; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP8]], 0
@@ -435,10 +435,10 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, <2 x do
 ; CHECK-NEXT:    unreachable
 ; CHECK:       11:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> [[A0:%.*]], <2 x double> [[A1]])
-; CHECK-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
-  %a1 = load <2 x double>, <2 x double>* %p1
+  %a1 = load <2 x double>, ptr %p1
   %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
@@ -446,7 +446,7 @@ define <4 x float> @test_x86_sse2_cvtsd2ss_load_optsize(<4 x float> %a0, <2 x do
 
 define <4 x i32> @test_x86_sse2_cvttpd2dq(<2 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvttpd2dq(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -456,7 +456,7 @@ define <4 x i32> @test_x86_sse2_cvttpd2dq(<2 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0) ; <<4 x i32>> [#uses=1]
@@ -467,7 +467,7 @@ declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone
 
 define <2 x i64> @test_mm_cvttpd_epi32_zext(<2 x double> %a0) nounwind #0 {
 ; CHECK-LABEL: @test_mm_cvttpd_epi32_zext(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -479,7 +479,7 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext(<2 x double> %a0) nounwind #0 {
 ; CHECK-NEXT:    [[CVT:%.*]] = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> [[A0:%.*]])
 ; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[CVT]], <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
 ; CHECK-NEXT:    [[BC:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[BC]]
 ;
   %cvt = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
@@ -489,9 +489,9 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext(<2 x double> %a0) nounwind #0 {
 }
 
 
-define <2 x i64> @test_mm_cvttpd_epi32_zext_load(<2 x double>* %p0) nounwind #0 {
+define <2 x i64> @test_mm_cvttpd_epi32_zext_load(ptr %p0) nounwind #0 {
 ; CHECK-LABEL: @test_mm_cvttpd_epi32_zext_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
@@ -499,11 +499,11 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext_load(<2 x double>* %p0) nounwind #0
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       3:
-; CHECK-NEXT:    [[A0:%.*]] = load <2 x double>, <2 x double>* [[P0:%.*]], align 16
-; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint <2 x double>* [[P0]] to i64
+; CHECK-NEXT:    [[A0:%.*]] = load <2 x double>, ptr [[P0:%.*]], align 16
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P0]] to i64
 ; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
-; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to <2 x i64>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP6]], align 16
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 16
 ; CHECK-NEXT:    [[TMP7:%.*]] = bitcast <2 x i64> [[_MSLD]] to i128
 ; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i128 [[TMP7]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP1]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
@@ -514,10 +514,10 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext_load(<2 x double>* %p0) nounwind #0
 ; CHECK-NEXT:    [[CVT:%.*]] = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> [[A0]])
 ; CHECK-NEXT:    [[RES:%.*]] = shufflevector <4 x i32> [[CVT]], <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
 ; CHECK-NEXT:    [[BC:%.*]] = bitcast <4 x i32> [[RES]] to <2 x i64>
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[BC]]
 ;
-  %a0 = load <2 x double>, <2 x double>* %p0
+  %a0 = load <2 x double>, ptr %p0
   %cvt = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
   %res = shufflevector <4 x i32> %cvt, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   %bc = bitcast <4 x i32> %res to <2 x i64>
@@ -527,7 +527,7 @@ define <2 x i64> @test_mm_cvttpd_epi32_zext_load(<2 x double>* %p0) nounwind #0
 
 define <4 x i32> @test_x86_sse2_cvttps2dq(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvttps2dq(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -537,7 +537,7 @@ define <4 x i32> @test_x86_sse2_cvttps2dq(<4 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> [[A0:%.*]])
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0) ; <<4 x i32>> [#uses=1]
@@ -548,7 +548,7 @@ declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
 
 define i32 @test_x86_sse2_cvttsd2si(<2 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_cvttsd2si(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP2]], 0
@@ -558,7 +558,7 @@ define i32 @test_x86_sse2_cvttsd2si(<2 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0) ; <i32> [#uses=1]
@@ -569,12 +569,12 @@ declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone
 
 define <2 x double> @test_x86_sse2_max_pd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_max_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <2 x i64> [[_MSPROP]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -585,13 +585,13 @@ declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind
 
 define <2 x double> @test_x86_sse2_max_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_max_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <2 x i32> <i32 2, i32 1>
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <2 x i64> [[TMP4]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP4]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -602,12 +602,12 @@ declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind
 
 define <2 x double> @test_x86_sse2_min_pd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_min_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <2 x i64> [[_MSPROP]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -618,13 +618,13 @@ declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind
 
 define <2 x double> @test_x86_sse2_min_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_min_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP3]], <2 x i32> <i32 2, i32 1>
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store <2 x i64> [[TMP4]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP4]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1]
@@ -635,7 +635,7 @@ declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind
 
 define i32 @test_x86_sse2_movmsk_pd(<2 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_movmsk_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -645,7 +645,7 @@ define i32 @test_x86_sse2_movmsk_pd(<2 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0) ; <i32> [#uses=1]
@@ -656,8 +656,8 @@ declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_packssdw_128(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -665,7 +665,7 @@ define <8 x i16> @test_x86_sse2_packssdw_128(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i32>
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> [[TMP4]], <4 x i32> [[TMP6]])
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i16> [[_MSPROP_VECTOR_PACK]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
@@ -679,7 +679,7 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() #0 {
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
-; CHECK-NEXT:    store <8 x i16> [[_MSPROP_VECTOR_PACK]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
@@ -689,8 +689,8 @@ define <8 x i16> @test_x86_sse2_packssdw_128_fold() #0 {
 
 define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_packsswb_128(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16>
@@ -698,7 +698,7 @@ define <16 x i8> @test_x86_sse2_packsswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i16>
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> [[TMP4]], <8 x i16> [[TMP6]])
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i8> [[_MSPROP_VECTOR_PACK]], <16 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i8>*), align 8
+; CHECK-NEXT:    store <16 x i8> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i8> [[RES]]
 ;
   %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
@@ -712,7 +712,7 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() #0 {
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> zeroinitializer)
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
-; CHECK-NEXT:    store <16 x i8> [[_MSPROP_VECTOR_PACK]], <16 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i8>*), align 8
+; CHECK-NEXT:    store <16 x i8> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i8> [[RES]]
 ;
   %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
@@ -722,8 +722,8 @@ define <16 x i8> @test_x86_sse2_packsswb_128_fold() #0 {
 
 define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_packuswb_128(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <8 x i16> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <8 x i1> [[TMP3]] to <8 x i16>
@@ -731,7 +731,7 @@ define <16 x i8> @test_x86_sse2_packuswb_128(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <8 x i1> [[TMP5]] to <8 x i16>
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> [[TMP4]], <8 x i16> [[TMP6]])
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i8> [[_MSPROP_VECTOR_PACK]], <16 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i8>*), align 8
+; CHECK-NEXT:    store <16 x i8> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i8> [[RES]]
 ;
   %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1) ; <<16 x i8>> [#uses=1]
@@ -745,7 +745,7 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() #0 {
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> zeroinitializer, <8 x i16> zeroinitializer)
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 -1, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
-; CHECK-NEXT:    store <16 x i8> [[_MSPROP_VECTOR_PACK]], <16 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i8>*), align 8
+; CHECK-NEXT:    store <16 x i8> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i8> [[RES]]
 ;
   %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <8 x i16> zeroinitializer)
@@ -755,12 +755,12 @@ define <16 x i8> @test_x86_sse2_packuswb_128_fold() #0 {
 
 define <16 x i8> @test_x86_sse2_pavg_b(<16 x i8> %a0, <16 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pavg_b(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <16 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> [[A0:%.*]], <16 x i8> [[A1:%.*]])
-; CHECK-NEXT:    store <16 x i8> [[_MSPROP]], <16 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i8>*), align 8
+; CHECK-NEXT:    store <16 x i8> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i8> [[RES]]
 ;
   %res = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1]
@@ -771,12 +771,12 @@ declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_pavg_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pavg_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i16> [[_MSPROP]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -787,15 +787,15 @@ declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <4 x i32> @test_x86_sse2_pmadd_wd(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pmadd_wd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i16> [[TMP3]] to <4 x i32>
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne <4 x i32> [[TMP4]], zeroinitializer
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i32>
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> %a1) ; <<4 x i32>> [#uses=1]
@@ -806,7 +806,7 @@ declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnon
 
 define i32 @test_x86_sse2_pmovmskb_128(<16 x i8> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pmovmskb_128(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -816,7 +816,7 @@ define i32 @test_x86_sse2_pmovmskb_128(<16 x i8> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> [[A0:%.*]])
-; CHECK-NEXT:    store i32 0, i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %a0) ; <i32> [#uses=1]
@@ -827,12 +827,12 @@ declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_pmulh_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pmulh_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i16> [[_MSPROP]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -843,12 +843,12 @@ declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_pmulhu_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pmulhu_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <8 x i16> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i16> [[_MSPROP]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[_MSPROP]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -859,8 +859,8 @@ declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnon
 
 define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psad_bw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <16 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x i64>
@@ -868,7 +868,7 @@ define <2 x i64> @test_x86_sse2_psad_bw(<16 x i8> %a0, <16 x i8> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <2 x i1> [[TMP5]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP7:%.*]] = lshr <2 x i64> [[TMP6]], <i64 48, i64 48>
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> [[A0:%.*]], <16 x i8> [[A1:%.*]])
-; CHECK-NEXT:    store <2 x i64> [[TMP7]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP7]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %a0, <16 x i8> %a1) ; <<2 x i64>> [#uses=1]
@@ -879,8 +879,8 @@ declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone
 
 define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psll_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -890,7 +890,7 @@ define <4 x i32> @test_x86_sse2_psll_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <4 x i32> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -901,8 +901,8 @@ declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psll_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -912,7 +912,7 @@ define <2 x i64> @test_x86_sse2_psll_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> [[TMP1]], <2 x i64> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <2 x i64> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> [[A0:%.*]], <2 x i64> [[A1]])
-; CHECK-NEXT:    store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -923,8 +923,8 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psll_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -934,7 +934,7 @@ define <8 x i16> @test_x86_sse2_psll_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> [[TMP1]], <8 x i16> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1]])
-; CHECK-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -945,12 +945,12 @@ declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <4 x i32> @test_x86_sse2_pslli_d(<4 x i32> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pslli_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -961,12 +961,12 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
 
 define <2 x i64> @test_x86_sse2_pslli_q(<2 x i64> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pslli_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <2 x i64> [[TMP3]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -977,12 +977,12 @@ declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_pslli_w(<8 x i16> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_pslli_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i16> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <8 x i16> [[TMP3]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -993,8 +993,8 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
 
 define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psra_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1004,7 +1004,7 @@ define <4 x i32> @test_x86_sse2_psra_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <4 x i32> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1015,8 +1015,8 @@ declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psra_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1026,7 +1026,7 @@ define <8 x i16> @test_x86_sse2_psra_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> [[TMP1]], <8 x i16> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1]])
-; CHECK-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1037,12 +1037,12 @@ declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 define <4 x i32> @test_x86_sse2_psrai_d(<4 x i32> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrai_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -1053,12 +1053,12 @@ declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_psrai_w(<8 x i16> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrai_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i16> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <8 x i16> [[TMP3]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -1069,8 +1069,8 @@ declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
 
 define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrl_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1080,7 +1080,7 @@ define <4 x i32> @test_x86_sse2_psrl_d(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> [[TMP1]], <4 x i32> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <4 x i32> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> [[A0:%.*]], <4 x i32> [[A1]])
-; CHECK-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1]
@@ -1091,8 +1091,8 @@ declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrl_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1102,7 +1102,7 @@ define <2 x i64> @test_x86_sse2_psrl_q(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> [[TMP1]], <2 x i64> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <2 x i64> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> [[A0:%.*]], <2 x i64> [[A1]])
-; CHECK-NEXT:    store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1]
@@ -1113,8 +1113,8 @@ declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrl_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to i128
 ; CHECK-NEXT:    [[TMP4:%.*]] = trunc i128 [[TMP3]] to i64
@@ -1124,7 +1124,7 @@ define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> [[TMP1]], <8 x i16> [[A1:%.*]])
 ; CHECK-NEXT:    [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP7]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1]])
-; CHECK-NEXT:    store <8 x i16> [[TMP9]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[TMP9]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
@@ -1133,10 +1133,10 @@ define <8 x i16> @test_x86_sse2_psrl_w(<8 x i16> %a0, <8 x i16> %a1) #0 {
 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
 
 
-define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, <8 x i16>* %p) #0 {
+define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, ptr %p) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrl_w_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
@@ -1144,11 +1144,11 @@ define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, <8 x i16>* %p) #0 {
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A1:%.*]] = load <8 x i16>, <8 x i16>* [[P:%.*]], align 16
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <8 x i16>* [[P]] to i64
+; CHECK-NEXT:    [[A1:%.*]] = load <8 x i16>, ptr [[P:%.*]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <8 x i16>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <8 x i16>, <8 x i16>* [[TMP7]], align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <8 x i16>, ptr [[TMP7]], align 16
 ; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x i16> [[_MSLD]] to i128
 ; CHECK-NEXT:    [[TMP9:%.*]] = trunc i128 [[TMP8]] to i64
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ne i64 [[TMP9]], 0
@@ -1157,10 +1157,10 @@ define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, <8 x i16>* %p) #0 {
 ; CHECK-NEXT:    [[TMP13:%.*]] = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> [[TMP2]], <8 x i16> [[A1]])
 ; CHECK-NEXT:    [[TMP14:%.*]] = or <8 x i16> [[TMP13]], [[TMP12]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> [[A0:%.*]], <8 x i16> [[A1]])
-; CHECK-NEXT:    store <8 x i16> [[TMP14]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[TMP14]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
-  %a1 = load <8 x i16>, <8 x i16>* %p
+  %a1 = load <8 x i16>, ptr %p
   %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
@@ -1168,12 +1168,12 @@ define <8 x i16> @test_x86_sse2_psrl_w_load(<8 x i16> %a0, <8 x i16>* %p) #0 {
 
 define <4 x i32> @test_x86_sse2_psrli_d(<4 x i32> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrli_d(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <4 x i32> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <4 x i32> [[TMP3]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x i32> [[RES]]
 ;
   %res = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %a0, i32 7) ; <<4 x i32>> [#uses=1]
@@ -1184,12 +1184,12 @@ declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone
 
 define <2 x i64> @test_x86_sse2_psrli_q(<2 x i64> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrli_q(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <2 x i64> [[TMP3]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1]
@@ -1200,12 +1200,12 @@ declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone
 
 define <8 x i16> @test_x86_sse2_psrli_w(<8 x i16> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse2_psrli_w(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> [[TMP1]], i32 7)
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <8 x i16> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <8 x i16> [[TMP3]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %a0, i32 7) ; <<8 x i16>> [#uses=1]
@@ -1216,15 +1216,15 @@ declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone
 
 define i32 @test_x86_sse2_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_ucomieq_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1235,15 +1235,15 @@ declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readn
 
 define i32 @test_x86_sse2_ucomige_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_ucomige_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.ucomige.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.ucomige.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1254,15 +1254,15 @@ declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readn
 
 define i32 @test_x86_sse2_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_ucomigt_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.ucomigt.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.ucomigt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1273,15 +1273,15 @@ declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readn
 
 define i32 @test_x86_sse2_ucomile_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_ucomile_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.ucomile.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.ucomile.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1292,15 +1292,15 @@ declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readn
 
 define i32 @test_x86_sse2_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_ucomilt_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.ucomilt.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.ucomilt.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1311,15 +1311,15 @@ declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readn
 
 define i32 @test_x86_sse2_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse2_ucomineq_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP3]], i64 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext i1 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse2.ucomineq.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse2.ucomineq.sd(<2 x double> %a0, <2 x double> %a1) ; <i32> [#uses=1]
@@ -1360,9 +1360,9 @@ define void @mfence() nounwind #0 {
 }
 declare void @llvm.x86.sse2.mfence() nounwind
 
-define void @clflush(i8* %p) nounwind #0 {
+define void @clflush(ptr %p) nounwind #0 {
 ; CHECK-LABEL: @clflush(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
@@ -1370,12 +1370,12 @@ define void @clflush(i8* %p) nounwind #0 {
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR6]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       3:
-; CHECK-NEXT:    tail call void @llvm.x86.sse2.clflush(i8* [[P:%.*]])
+; CHECK-NEXT:    tail call void @llvm.x86.sse2.clflush(ptr [[P:%.*]])
 ; CHECK-NEXT:    ret void
 ;
-  tail call void @llvm.x86.sse2.clflush(i8* %p)
+  tail call void @llvm.x86.sse2.clflush(ptr %p)
   ret void
 }
-declare void @llvm.x86.sse2.clflush(i8*) nounwind
+declare void @llvm.x86.sse2.clflush(ptr) nounwind
 
 attributes #0 = { sanitize_memory }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/sse41-intrinsics-x86.ll b/llvm/test/Instrumentation/MemorySanitizer/sse41-intrinsics-x86.ll
index 92fe09c4746b5..666c84e62da20 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/sse41-intrinsics-x86.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/sse41-intrinsics-x86.ll
@@ -6,14 +6,14 @@ target triple = "x86_64-unknown-linux-gnu"
 
 define <2 x double> @test_x86_sse41_blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) #0 {
 ; CHECK-LABEL: @test_x86_sse41_blendvpd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <2 x i64> [[_MSPROP]], [[TMP3]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], <2 x double> [[A2:%.*]])
-; CHECK-NEXT:    store <2 x i64> [[_MSPROP1]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) ; <<2 x double>> [#uses=1]
@@ -24,14 +24,14 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
 
 define <4 x float> @test_x86_sse41_blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) #0 {
 ; CHECK-LABEL: @test_x86_sse41_blendvps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], <4 x float> [[A2:%.*]])
-; CHECK-NEXT:    store <4 x i32> [[_MSPROP1]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) ; <<4 x float>> [#uses=1]
@@ -42,8 +42,8 @@ declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x floa
 
 define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_dppd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -56,7 +56,7 @@ define <2 x double> @test_x86_sse41_dppd(<2 x double> %a0, <2 x double> %a1) #0
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse41.dppd(<2 x double> %a0, <2 x double> %a1, i8 7) ; <<2 x double>> [#uses=1]
@@ -67,8 +67,8 @@ declare <2 x double> @llvm.x86.sse41.dppd(<2 x double>, <2 x double>, i8) nounwi
 
 define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_dpps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -81,7 +81,7 @@ define <4 x float> @test_x86_sse41_dpps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse41.dpps(<4 x float> %a0, <4 x float> %a1, i8 7) ; <<4 x float>> [#uses=1]
@@ -92,8 +92,8 @@ declare <4 x float> @llvm.x86.sse41.dpps(<4 x float>, <4 x float>, i8) nounwind
 
 define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_insertps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -106,7 +106,7 @@ define <4 x float> @test_x86_sse41_insertps(<4 x float> %a0, <4 x float> %a1) #0
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> [[A0:%.*]], <4 x float> [[A1:%.*]], i8 17)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 17) ; <<4 x float>> [#uses=1]
@@ -118,8 +118,8 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounw
 
 define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_mpsadbw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <16 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
@@ -132,7 +132,7 @@ define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       6:
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> [[A0:%.*]], <16 x i8> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <8 x i16> zeroinitializer, <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
@@ -140,10 +140,10 @@ define <8 x i16> @test_x86_sse41_mpsadbw(<16 x i8> %a0, <16 x i8> %a1) #0 {
 }
 declare <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8>, <16 x i8>, i8) nounwind readnone
 
-define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(<16 x i8>* %ptr, <16 x i8> %a1) #0 {
+define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(ptr %ptr, <16 x i8> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_mpsadbw_load_op0(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP2:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
@@ -151,11 +151,11 @@ define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(<16 x i8>* %ptr, <16 x i8> %a1
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A0:%.*]] = load <16 x i8>, <16 x i8>* [[PTR:%.*]], align 16
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <16 x i8>* [[PTR]] to i64
+; CHECK-NEXT:    [[A0:%.*]] = load <16 x i8>, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[PTR]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <16 x i8>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <16 x i8>, <16 x i8>* [[TMP7]], align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 16
 ; CHECK-NEXT:    [[TMP8:%.*]] = bitcast <16 x i8> [[_MSLD]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP8]], 0
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
@@ -167,18 +167,18 @@ define <8 x i16> @test_x86_sse41_mpsadbw_load_op0(<16 x i8>* %ptr, <16 x i8> %a1
 ; CHECK-NEXT:    unreachable
 ; CHECK:       11:
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> [[A0]], <16 x i8> [[A1:%.*]], i8 7)
-; CHECK-NEXT:    store <8 x i16> zeroinitializer, <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
-  %a0 = load <16 x i8>, <16 x i8>* %ptr
+  %a0 = load <16 x i8>, ptr %ptr
   %res = call <8 x i16> @llvm.x86.sse41.mpsadbw(<16 x i8> %a0, <16 x i8> %a1, i8 7) ; <<8 x i16>> [#uses=1]
   ret <8 x i16> %res
 }
 
 define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_packusdw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP1]], zeroinitializer
 ; CHECK-NEXT:    [[TMP4:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32>
@@ -186,7 +186,7 @@ define <8 x i16> @test_x86_sse41_packusdw(<4 x i32> %a0, <4 x i32> %a1) #0 {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i32>
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> [[TMP4]], <4 x i32> [[TMP6]])
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> [[A0:%.*]], <4 x i32> [[A1:%.*]])
-; CHECK-NEXT:    store <8 x i16> [[_MSPROP_VECTOR_PACK]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) ; <<8 x i16>> [#uses=1]
@@ -200,7 +200,7 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() #0 {
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP_VECTOR_PACK:%.*]] = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> zeroinitializer, <4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
-; CHECK-NEXT:    store <8 x i16> [[_MSPROP_VECTOR_PACK]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[_MSPROP_VECTOR_PACK]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> zeroinitializer, <4 x i32> <i32 65535, i32 65536, i32 -1, i32 -131072>)
@@ -210,14 +210,14 @@ define <8 x i16> @test_x86_sse41_packusdw_fold() #0 {
 
 define <16 x i8> @test_x86_sse41_pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) #0 {
 ; CHECK-LABEL: @test_x86_sse41_pblendvb(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, <16 x i8>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <16 x i8>*), align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <16 x i8>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i8>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSPROP:%.*]] = or <16 x i8> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[_MSPROP1:%.*]] = or <16 x i8> [[_MSPROP]], [[TMP3]]
 ; CHECK-NEXT:    [[RES:%.*]] = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> [[A0:%.*]], <16 x i8> [[A1:%.*]], <16 x i8> [[A2:%.*]])
-; CHECK-NEXT:    store <16 x i8> [[_MSPROP1]], <16 x i8>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i8>*), align 8
+; CHECK-NEXT:    store <16 x i8> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <16 x i8> [[RES]]
 ;
   %res = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) ; <<16 x i8>> [#uses=1]
@@ -228,10 +228,10 @@ declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) noun
 
 define <8 x i16> @test_x86_sse41_phminposuw(<8 x i16> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse41_phminposuw(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> [[A0:%.*]])
-; CHECK-NEXT:    store <8 x i16> [[TMP1]], <8 x i16>* bitcast ([100 x i64]* @__msan_retval_tls to <8 x i16>*), align 8
+; CHECK-NEXT:    store <8 x i16> [[TMP1]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <8 x i16> [[RES]]
 ;
   %res = call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %a0) ; <<8 x i16>> [#uses=1]
@@ -242,15 +242,15 @@ declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) nounwind readnone
 
 define i32 @test_x86_sse41_ptestc(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_ptestc(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i1> [[TMP4]] to i2
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i2 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse41.ptestc(<2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
@@ -261,15 +261,15 @@ declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
 
 define i32 @test_x86_sse41_ptestnzc(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_ptestnzc(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i1> [[TMP4]] to i2
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i2 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
@@ -280,15 +280,15 @@ declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
 
 define i32 @test_x86_sse41_ptestz(<2 x i64> %a0, <2 x i64> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_ptestz(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <2 x i1> [[TMP4]] to i2
 ; CHECK-NEXT:    [[TMP6:%.*]] = zext i2 [[TMP5]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = call i32 @llvm.x86.sse41.ptestz(<2 x i64> [[A0:%.*]], <2 x i64> [[A1:%.*]])
-; CHECK-NEXT:    store i32 [[TMP6]], i32* bitcast ([100 x i64]* @__msan_retval_tls to i32*), align 8
+; CHECK-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %res = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %a0, <2 x i64> %a1) ; <i32> [#uses=1]
@@ -299,7 +299,7 @@ declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
 
 define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse41_round_pd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -309,7 +309,7 @@ define <2 x double> @test_x86_sse41_round_pd(<2 x double> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <2 x i64> zeroinitializer, <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 7) ; <<2 x double>> [#uses=1]
@@ -320,7 +320,7 @@ declare <2 x double> @llvm.x86.sse41.round.pd(<2 x double>, i32) nounwind readno
 
 define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) #0 {
 ; CHECK-LABEL: @test_x86_sse41_round_ps(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
@@ -330,7 +330,7 @@ define <4 x float> @test_x86_sse41_round_ps(<4 x float> %a0) #0 {
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> [[A0:%.*]], i32 7)
-; CHECK-NEXT:    store <4 x i32> zeroinitializer, <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
   %res = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 7) ; <<4 x float>> [#uses=1]
@@ -341,12 +341,12 @@ declare <4 x float> @llvm.x86.sse41.round.ps(<4 x float>, i32) nounwind readnone
 
 define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_round_sd(
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP2]], <2 x i32> <i32 2, i32 1>
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1:%.*]], i32 7)
-; CHECK-NEXT:    store <2 x i64> [[TMP3]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP3]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
   %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1, i32 7) ; <<2 x double>> [#uses=1]
@@ -355,10 +355,10 @@ define <2 x double> @test_x86_sse41_round_sd(<2 x double> %a0, <2 x double> %a1)
 declare <2 x double> @llvm.x86.sse41.round.sd(<2 x double>, <2 x double>, i32) nounwind readnone
 
 
-define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, <2 x double>* %a1) #0 {
+define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, ptr %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_round_sd_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
@@ -366,26 +366,26 @@ define <2 x double> @test_x86_sse41_round_sd_load(<2 x double> %a0, <2 x double>
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A1B:%.*]] = load <2 x double>, <2 x double>* [[A1:%.*]], align 16
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <2 x double>* [[A1]] to i64
+; CHECK-NEXT:    [[A1B:%.*]] = load <2 x double>, ptr [[A1:%.*]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[A1]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <2 x i64>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP7]], align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i64>, ptr [[TMP7]], align 16
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> [[_MSLD]], <2 x i32> <i32 2, i32 1>
 ; CHECK-NEXT:    [[RES:%.*]] = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> [[A0:%.*]], <2 x double> [[A1B]], i32 7)
-; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <2 x i64>*), align 8
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <2 x double> [[RES]]
 ;
-  %a1b = load <2 x double>, <2 x double>* %a1
+  %a1b = load <2 x double>, ptr %a1
   %res = call <2 x double> @llvm.x86.sse41.round.sd(<2 x double> %a0, <2 x double> %a1b, i32 7) ; <<2 x double>> [#uses=1]
   ret <2 x double> %res
 }
 
 
-define <4 x float> @test_x86_sse41_round_ss_load(<4 x float> %a0, <4 x float>* %a1) #0 {
+define <4 x float> @test_x86_sse41_round_ss_load(<4 x float> %a0, ptr %a1) #0 {
 ; CHECK-LABEL: @test_x86_sse41_round_ss_load(
-; CHECK-NEXT:    [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 16) to i64*), align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
 ; CHECK-NEXT:    call void @llvm.donothing()
 ; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP3:%.*]], label [[TMP4:%.*]], !prof [[PROF0]]
@@ -393,17 +393,17 @@ define <4 x float> @test_x86_sse41_round_ss_load(<4 x float> %a0, <4 x float>* %
 ; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
 ; CHECK-NEXT:    unreachable
 ; CHECK:       4:
-; CHECK-NEXT:    [[A1B:%.*]] = load <4 x float>, <4 x float>* [[A1:%.*]], align 16
-; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint <4 x float>* [[A1]] to i64
+; CHECK-NEXT:    [[A1B:%.*]] = load <4 x float>, ptr [[A1:%.*]], align 16
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[A1]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to <4 x i32>*
-; CHECK-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP7]], align 16
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP7]], align 16
 ; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[_MSLD]], <4 x i32> <i32 4, i32 1, i32 2, i32 3>
 ; CHECK-NEXT:    [[RES:%.*]] = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> [[A0:%.*]], <4 x float> [[A1B]], i32 7)
-; CHECK-NEXT:    store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i32>*), align 8
+; CHECK-NEXT:    store <4 x i32> [[TMP8]], ptr @__msan_retval_tls, align 8
 ; CHECK-NEXT:    ret <4 x float> [[RES]]
 ;
-  %a1b = load <4 x float>, <4 x float>* %a1
+  %a1b = load <4 x float>, ptr %a1
   %res = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %a0, <4 x float> %a1b, i32 7) ; <<4 x float>> [#uses=1]
   ret <4 x float> %res
 }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/store-long-origin.ll b/llvm/test/Instrumentation/MemorySanitizer/store-long-origin.ll
index ffbd9ba38973a..43fec99d86d71 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/store-long-origin.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/store-long-origin.ll
@@ -7,9 +7,9 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; Test origin for longer stores.
 
-define void @Store8(i64* nocapture %p, i64 %x) sanitize_memory {
+define void @Store8(ptr nocapture %p, i64 %x) sanitize_memory {
 entry:
-  store i64 %x, i64* %p, align 8
+  store i64 %x, ptr %p, align 8
   ret void
 }
 
@@ -20,9 +20,9 @@ entry:
 ; CHECK: store i64 {{.*}}, align 8
 ; CHECK: ret void
 
-define void @Store8_align4(i64* nocapture %p, i64 %x) sanitize_memory {
+define void @Store8_align4(ptr nocapture %p, i64 %x) sanitize_memory {
 entry:
-  store i64 %x, i64* %p, align 4
+  store i64 %x, ptr %p, align 4
   ret void
 }
 
@@ -30,16 +30,16 @@ entry:
 ; CHECK-LABEL: define void @Store8_align4(
 ; CHECK: store i64 {{.*}}, align 4
 ; CHECK: store i32 {{.*}}, align 4
-; CHECK: getelementptr i32, i32* {{.*}}, i32 1
+; CHECK: getelementptr i32, ptr {{.*}}, i32 1
 ; CHECK: store i32 {{.*}}, align 4
 ; CHECK: store i64 {{.*}}, align 4
 ; CHECK: ret void
 
 %struct.S = type { i32, i32, i32 }
 
-define void @StoreAgg(%struct.S* nocapture %p, %struct.S %x) sanitize_memory {
+define void @StoreAgg(ptr nocapture %p, %struct.S %x) sanitize_memory {
 entry:
-  store %struct.S %x, %struct.S* %p, align 4
+  store %struct.S %x, ptr %p, align 4
   ret void
 }
 
@@ -47,17 +47,17 @@ entry:
 ; CHECK-LABEL: define void @StoreAgg(
 ; CHECK: store { i32, i32, i32 }  {{.*}}, align 4
 ; CHECK: store i32 {{.*}}, align 4
-; CHECK: getelementptr i32, i32* {{.*}}, i32 1
+; CHECK: getelementptr i32, ptr {{.*}}, i32 1
 ; CHECK: store i32 {{.*}}, align 4
-; CHECK: getelementptr i32, i32* {{.*}}, i32 2
+; CHECK: getelementptr i32, ptr {{.*}}, i32 2
 ; CHECK: store i32 {{.*}}, align 4
 ; CHECK: store %struct.S {{.*}}, align 4
 ; CHECK: ret void
 
 
-define void @StoreAgg8(%struct.S* nocapture %p, %struct.S %x) sanitize_memory {
+define void @StoreAgg8(ptr nocapture %p, %struct.S %x) sanitize_memory {
 entry:
-  store %struct.S %x, %struct.S* %p, align 8
+  store %struct.S %x, ptr %p, align 8
   ret void
 }
 
@@ -65,16 +65,16 @@ entry:
 ; CHECK-LABEL: define void @StoreAgg8(
 ; CHECK: store { i32, i32, i32 }  {{.*}}, align 8
 ; CHECK: store i64 {{.*}}, align 8
-; CHECK: getelementptr i32, i32* {{.*}}, i32 2
+; CHECK: getelementptr i32, ptr {{.*}}, i32 2
 ; CHECK: store i32 {{.*}}, align 8
 ; CHECK: store %struct.S {{.*}}, align 8
 ; CHECK: ret void
 
 
 %struct.Q = type { i64, i64, i64 }
-define void @StoreAgg24(%struct.Q* nocapture %p, %struct.Q %x) sanitize_memory {
+define void @StoreAgg24(ptr nocapture %p, %struct.Q %x) sanitize_memory {
 entry:
-  store %struct.Q %x, %struct.Q* %p, align 8
+  store %struct.Q %x, ptr %p, align 8
   ret void
 }
 
@@ -82,9 +82,9 @@ entry:
 ; CHECK-LABEL: define void @StoreAgg24(
 ; CHECK: store { i64, i64, i64 }  {{.*}}, align 8
 ; CHECK: store i64 {{.*}}, align 8
-; CHECK: getelementptr i64, i64* {{.*}}, i32 1
+; CHECK: getelementptr i64, ptr {{.*}}, i32 1
 ; CHECK: store i64 {{.*}}, align 8
-; CHECK: getelementptr i64, i64* {{.*}}, i32 2
+; CHECK: getelementptr i64, ptr {{.*}}, i32 2
 ; CHECK: store i64 {{.*}}, align 8
 ; CHECK: store %struct.Q {{.*}}, align 8
 ; CHECK: ret void

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/store-origin.ll b/llvm/test/Instrumentation/MemorySanitizer/store-origin.ll
index 5e9b3e5ac7558..eca12989cf39f 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/store-origin.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/store-origin.ll
@@ -10,11 +10,11 @@ target triple = "x86_64-unknown-linux-gnu"
 ; Check that debug info for origin propagation code is set correctly.
 
 ; Function Attrs: nounwind
-define void @Store(i32* nocapture %p, i32 %x) #0 !dbg !4 {
+define void @Store(ptr nocapture %p, i32 %x) #0 !dbg !4 {
 entry:
-  tail call void @llvm.dbg.value(metadata i32* %p, i64 0, metadata !11, metadata !DIExpression()), !dbg !16
+  tail call void @llvm.dbg.value(metadata ptr %p, i64 0, metadata !11, metadata !DIExpression()), !dbg !16
   tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !12, metadata !DIExpression()), !dbg !16
-  store i32 %x, i32* %p, align 4, !dbg !17, !tbaa !18
+  store i32 %x, ptr %p, align 4, !dbg !17, !tbaa !18
   ret void, !dbg !22
 }
 
@@ -58,9 +58,9 @@ attributes #1 = { nounwind readnone }
 ; CHECK-MSAN: [[ORIGIN:%[0-9a-z]+]] = load {{.*}} @__msan_param_origin_tls
 
 ; CHECK-KMSAN: %param_shadow
-; CHECK-KMSAN: load i32, i32*
+; CHECK-KMSAN: load i32, ptr
 ; CHECK-KMSAN: %param_origin
-; CHECK-KMSAN: [[ORIGIN:%[0-9a-z]+]] = load i32, i32*
+; CHECK-KMSAN: [[ORIGIN:%[0-9a-z]+]] = load i32, ptr
 
 ; CHECK: store {{.*}}!dbg ![[DBG:[0-9]+]]
 ; CHECK: icmp

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll b/llvm/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll
index 7d6f164b2de21..ca31bc83128c9 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/str-nobuiltin.ll
@@ -4,13 +4,13 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare i8* @memchr(i8* %a, i32 %b, i64 %c)
-declare i32 @memcmp(i8* %a, i8* %b, i64 %c)
-declare i32 @strcmp(i8* %a, i8* %b)
-declare i8* @strcpy(i8* %a, i8* %b)
-declare i8* @stpcpy(i8* %a, i8* %b)
-declare i64 @strlen(i8* %a)
-declare i64 @strnlen(i8* %a, i64 %b)
+declare ptr @memchr(ptr %a, i32 %b, i64 %c)
+declare i32 @memcmp(ptr %a, ptr %b, i64 %c)
+declare i32 @strcmp(ptr %a, ptr %b)
+declare ptr @strcpy(ptr %a, ptr %b)
+declare ptr @stpcpy(ptr %a, ptr %b)
+declare i64 @strlen(ptr %a)
+declare i64 @strnlen(ptr %a, i64 %b)
 
 ; CHECK: call{{.*}}@memchr{{.*}} #[[ATTR:[0-9]+]]
 ; CHECK: call{{.*}}@memcmp{{.*}} #[[ATTR]]
@@ -21,13 +21,13 @@ declare i64 @strnlen(i8* %a, i64 %b)
 ; CHECK: call{{.*}}@strnlen{{.*}} #[[ATTR]]
 ; attributes #[[ATTR]] = { nobuiltin }
 
-define void @f1(i8* %a, i8* %b) nounwind uwtable sanitize_memory {
-  tail call i8* @memchr(i8* %a, i32 1, i64 12)
-  tail call i32 @memcmp(i8* %a, i8* %b, i64 12)
-  tail call i32 @strcmp(i8* %a, i8* %b)
-  tail call i8* @strcpy(i8* %a, i8* %b)
-  tail call i8* @stpcpy(i8* %a, i8* %b)
-  tail call i64 @strlen(i8* %a)
-  tail call i64 @strnlen(i8* %a, i64 12)
+define void @f1(ptr %a, ptr %b) nounwind uwtable sanitize_memory {
+  tail call ptr @memchr(ptr %a, i32 1, i64 12)
+  tail call i32 @memcmp(ptr %a, ptr %b, i64 12)
+  tail call i32 @strcmp(ptr %a, ptr %b)
+  tail call ptr @strcpy(ptr %a, ptr %b)
+  tail call ptr @stpcpy(ptr %a, ptr %b)
+  tail call i64 @strlen(ptr %a)
+  tail call i64 @strnlen(ptr %a, i64 12)
   ret void
 }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/unreachable.ll b/llvm/test/Instrumentation/MemorySanitizer/unreachable.ll
index f7b351c570edc..121c8bcf29836 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/unreachable.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/unreachable.ll
@@ -5,12 +5,12 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; Test that MemorySanitizer correctly handles unreachable blocks.
 
-define i32 @Func(i32* %p) nounwind uwtable {
+define i32 @Func(ptr %p) nounwind uwtable {
 entry:
   br label %exit
 
 unreachable:
-  %x = load i32, i32* %p
+  %x = load i32, ptr %p
   br label %exit
 
 exit:

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll b/llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll
index d41441c36ac01..bd2d07cf559c3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/unsized_type.ll
@@ -5,12 +5,12 @@
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*)
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
 declare i1 @llvm.coro.alloc(token)
 
 define void @foo() sanitize_memory {
 entry:
-  %id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
+  %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
   %dyn.alloc.reqd = call i1  @llvm.coro.alloc(token %id)
   ret void
 }

diff  --git a/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll b/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
index bec6fe40bd935..0b81e5682062a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/with-call-type-size.ll
@@ -90,9 +90,9 @@ define <4 x i32> @testUndef(<4 x i32> %vec, i32 %x) sanitize_memory {
 ; CHECK:         call void @__msan_warning_noreturn
 ; CHECK:         ret <4 x i32>
 
-declare <256 x i16> @llvm.masked.load.v256i16.p0v256i16(<256 x i16>*, i32, <256 x i1>, <256 x i16>)
-define <256 x i16> @testCombine(<256 x i16>* %vec, <256 x i1> %mask) sanitize_memory {
-  %vec1 = call <256 x i16> @llvm.masked.load.v256i16.p0v256i16(<256 x i16>* %vec, i32 16, <256 x i1> %mask, <256 x i16> zeroinitializer)
+declare <256 x i16> @llvm.masked.load.v256i16.p0(ptr, i32, <256 x i1>, <256 x i16>)
+define <256 x i16> @testCombine(ptr %vec, <256 x i1> %mask) sanitize_memory {
+  %vec1 = call <256 x i16> @llvm.masked.load.v256i16.p0(ptr %vec, i32 16, <256 x i1> %mask, <256 x i16> zeroinitializer)
   ret <256 x i16> %vec1
 }
 ; CHECK-LABEL: @testCombine(


        


More information about the llvm-commits mailing list