[llvm] 1ec71a9 - ThreadSanitizer: Convert tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 27 18:08:05 PST 2022


Author: Matt Arsenault
Date: 2022-11-27T20:55:15-05:00
New Revision: 1ec71a9569dca0597f040e21a607bbb0ea332cdf

URL: https://github.com/llvm/llvm-project/commit/1ec71a9569dca0597f040e21a607bbb0ea332cdf
DIFF: https://github.com/llvm/llvm-project/commit/1ec71a9569dca0597f040e21a607bbb0ea332cdf.diff

LOG: ThreadSanitizer: Convert tests to opaque pointers

Required manual fixes in atomic.ll, missing_dbg.ll

Added: 
    

Modified: 
    llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll
    llvm/test/Instrumentation/ThreadSanitizer/atomic.ll
    llvm/test/Instrumentation/ThreadSanitizer/capture.ll
    llvm/test/Instrumentation/ThreadSanitizer/debug_calls.ll
    llvm/test/Instrumentation/ThreadSanitizer/do-not-instrument-memory-access.ll
    llvm/test/Instrumentation/ThreadSanitizer/eh.ll
    llvm/test/Instrumentation/ThreadSanitizer/missing_dbg.ll
    llvm/test/Instrumentation/ThreadSanitizer/no_sanitize_thread.ll
    llvm/test/Instrumentation/ThreadSanitizer/read_before_write.ll
    llvm/test/Instrumentation/ThreadSanitizer/read_from_global.ll
    llvm/test/Instrumentation/ThreadSanitizer/sanitize-thread-no-checking.ll
    llvm/test/Instrumentation/ThreadSanitizer/str-nobuiltin.ll
    llvm/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll
    llvm/test/Instrumentation/ThreadSanitizer/tsan_address_space_attr.ll
    llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
    llvm/test/Instrumentation/ThreadSanitizer/tsan_musttail.ll
    llvm/test/Instrumentation/ThreadSanitizer/unaligned.ll
    llvm/test/Instrumentation/ThreadSanitizer/volatile.ll
    llvm/test/Instrumentation/ThreadSanitizer/vptr_read.ll
    llvm/test/Instrumentation/ThreadSanitizer/vptr_update.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll b/llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll
index a39f8553580d2..40c4bef3bff9b 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/atomic-non-integer.ll
@@ -2,50 +2,50 @@
 ; Check that atomic memory operations on floating-point types are converted to calls into ThreadSanitizer runtime.
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-define float @load_float(float* %fptr) {
-  %v = load atomic float, float* %fptr unordered, align 4
+define float @load_float(ptr %fptr) {
+  %v = load atomic float, ptr %fptr unordered, align 4
   ret float %v
   ; CHECK-LABEL: load_float
-  ; CHECK: call i32 @__tsan_atomic32_load(i32* %{{.+}}, i32 0)
+  ; CHECK: call i32 @__tsan_atomic32_load(ptr %{{.+}}, i32 0)
   ; CHECK: bitcast i32 {{.+}} to float
 }
 
-define double @load_double(double* %fptr) {
-  %v = load atomic double, double* %fptr unordered, align 8
+define double @load_double(ptr %fptr) {
+  %v = load atomic double, ptr %fptr unordered, align 8
   ret double %v
   ; CHECK-LABEL: load_double
-  ; CHECK: call i64 @__tsan_atomic64_load(i64* %{{.+}}, i32 0)
+  ; CHECK: call i64 @__tsan_atomic64_load(ptr %{{.+}}, i32 0)
   ; CHECK: bitcast i64 {{.+}} to double
 }
 
-define fp128 @load_fp128(fp128* %fptr) {
-  %v = load atomic fp128, fp128* %fptr unordered, align 16
+define fp128 @load_fp128(ptr %fptr) {
+  %v = load atomic fp128, ptr %fptr unordered, align 16
   ret fp128 %v
   ; CHECK-LABEL: load_fp128
-  ; CHECK: call i128 @__tsan_atomic128_load(i128* %{{.+}}, i32 0)
+  ; CHECK: call i128 @__tsan_atomic128_load(ptr %{{.+}}, i32 0)
   ; CHECK: bitcast i128 {{.+}} to fp128
 }
 
-define void @store_float(float* %fptr, float %v) {
-  store atomic float %v, float* %fptr unordered, align 4
+define void @store_float(ptr %fptr, float %v) {
+  store atomic float %v, ptr %fptr unordered, align 4
   ret void
   ; CHECK-LABEL: store_float
   ; CHECK: bitcast float %v to i32
-  ; CHECK: call void @__tsan_atomic32_store(i32* %{{.+}}, i32 %{{.+}}, i32 0)
+  ; CHECK: call void @__tsan_atomic32_store(ptr %{{.+}}, i32 %{{.+}}, i32 0)
 }
 
-define void @store_double(double* %fptr, double %v) {
-  store atomic double %v, double* %fptr unordered, align 8
+define void @store_double(ptr %fptr, double %v) {
+  store atomic double %v, ptr %fptr unordered, align 8
   ret void
   ; CHECK-LABEL: store_double
   ; CHECK: bitcast double %v to i64
-  ; CHECK: call void @__tsan_atomic64_store(i64* %{{.+}}, i64 %{{.+}}, i32 0)
+  ; CHECK: call void @__tsan_atomic64_store(ptr %{{.+}}, i64 %{{.+}}, i32 0)
 }
 
-define void @store_fp128(fp128* %fptr, fp128 %v) {
-  store atomic fp128 %v, fp128* %fptr unordered, align 16
+define void @store_fp128(ptr %fptr, fp128 %v) {
+  store atomic fp128 %v, ptr %fptr unordered, align 16
   ret void
   ; CHECK-LABEL: store_fp128
   ; CHECK: bitcast fp128 %v to i128
-  ; CHECK: call void @__tsan_atomic128_store(i128* %{{.+}}, i128 %{{.+}}, i32 0)
+  ; CHECK: call void @__tsan_atomic128_store(ptr %{{.+}}, i128 %{{.+}}, i32 0)
 }

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll b/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll
index 5705668f8d55b..dc86d1e092ef0 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/atomic.ll
@@ -2,2040 +2,2035 @@
 ; Check that atomic memory operations are converted to calls into ThreadSanitizer runtime.
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-define i8 @atomic8_load_unordered(i8* %a) nounwind uwtable {
+define i8 @atomic8_load_unordered(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i8, i8* %a unordered, align 1, !dbg !7
+  %0 = load atomic i8, ptr %a unordered, align 1, !dbg !7
   ret i8 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic8_load_unordered
-; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_load(ptr %a, i32 0), !dbg
 
-define i8 @atomic8_load_monotonic(i8* %a) nounwind uwtable {
+define i8 @atomic8_load_monotonic(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i8, i8* %a monotonic, align 1, !dbg !7
+  %0 = load atomic i8, ptr %a monotonic, align 1, !dbg !7
   ret i8 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic8_load_monotonic
-; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_load(ptr %a, i32 0), !dbg
 
-define i8 @atomic8_load_acquire(i8* %a) nounwind uwtable {
+define i8 @atomic8_load_acquire(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i8, i8* %a acquire, align 1, !dbg !7
+  %0 = load atomic i8, ptr %a acquire, align 1, !dbg !7
   ret i8 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic8_load_acquire
-; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_load(ptr %a, i32 2), !dbg
 
-define i8 @atomic8_load_seq_cst(i8* %a) nounwind uwtable {
+define i8 @atomic8_load_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i8, i8* %a seq_cst, align 1, !dbg !7
+  %0 = load atomic i8, ptr %a seq_cst, align 1, !dbg !7
   ret i8 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic8_load_seq_cst
-; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_load(ptr %a, i32 5), !dbg
 
-define void @atomic8_store_unordered(i8* %a) nounwind uwtable {
+define void @atomic8_store_unordered(ptr %a) nounwind uwtable {
 entry:
-  store atomic i8 0, i8* %a unordered, align 1, !dbg !7
+  store atomic i8 0, ptr %a unordered, align 1, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_store_unordered
-; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic8_store(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_store_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_store_monotonic(ptr %a) nounwind uwtable {
 entry:
-  store atomic i8 0, i8* %a monotonic, align 1, !dbg !7
+  store atomic i8 0, ptr %a monotonic, align 1, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_store_monotonic
-; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic8_store(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_store_release(i8* %a) nounwind uwtable {
+define void @atomic8_store_release(ptr %a) nounwind uwtable {
 entry:
-  store atomic i8 0, i8* %a release, align 1, !dbg !7
+  store atomic i8 0, ptr %a release, align 1, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_store_release
-; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 3), !dbg
+; CHECK: call void @__tsan_atomic8_store(ptr %a, i8 0, i32 3), !dbg
 
-define void @atomic8_store_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_store_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  store atomic i8 0, i8* %a seq_cst, align 1, !dbg !7
+  store atomic i8 0, ptr %a seq_cst, align 1, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_store_seq_cst
-; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 5), !dbg
+; CHECK: call void @__tsan_atomic8_store(ptr %a, i8 0, i32 5), !dbg
 
-define void @atomic8_xchg_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_xchg_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i8* %a, i8 0 monotonic, !dbg !7
+  atomicrmw xchg ptr %a, i8 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xchg_monotonic
-; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_exchange(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_add_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_add_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i8* %a, i8 0 monotonic, !dbg !7
+  atomicrmw add ptr %a, i8 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_add_monotonic
-; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_add(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_sub_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_sub_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i8* %a, i8 0 monotonic, !dbg !7
+  atomicrmw sub ptr %a, i8 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_sub_monotonic
-; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_and_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_and_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i8* %a, i8 0 monotonic, !dbg !7
+  atomicrmw and ptr %a, i8 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_and_monotonic
-; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_and(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_or_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_or_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i8* %a, i8 0 monotonic, !dbg !7
+  atomicrmw or ptr %a, i8 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_or_monotonic
-; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_or(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_xor_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_xor_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i8* %a, i8 0 monotonic, !dbg !7
+  atomicrmw xor ptr %a, i8 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xor_monotonic
-; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_nand_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_nand_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i8* %a, i8 0 monotonic, !dbg !7
+  atomicrmw nand ptr %a, i8 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_nand_monotonic
-; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(ptr %a, i8 0, i32 0), !dbg
 
-define void @atomic8_xchg_acquire(i8* %a) nounwind uwtable {
+define void @atomic8_xchg_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i8* %a, i8 0 acquire, !dbg !7
+  atomicrmw xchg ptr %a, i8 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xchg_acquire
-; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_exchange(ptr %a, i8 0, i32 2), !dbg
 
-define void @atomic8_add_acquire(i8* %a) nounwind uwtable {
+define void @atomic8_add_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i8* %a, i8 0 acquire, !dbg !7
+  atomicrmw add ptr %a, i8 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_add_acquire
-; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_add(ptr %a, i8 0, i32 2), !dbg
 
-define void @atomic8_sub_acquire(i8* %a) nounwind uwtable {
+define void @atomic8_sub_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i8* %a, i8 0 acquire, !dbg !7
+  atomicrmw sub ptr %a, i8 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_sub_acquire
-; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(ptr %a, i8 0, i32 2), !dbg
 
-define void @atomic8_and_acquire(i8* %a) nounwind uwtable {
+define void @atomic8_and_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i8* %a, i8 0 acquire, !dbg !7
+  atomicrmw and ptr %a, i8 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_and_acquire
-; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_and(ptr %a, i8 0, i32 2), !dbg
 
-define void @atomic8_or_acquire(i8* %a) nounwind uwtable {
+define void @atomic8_or_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i8* %a, i8 0 acquire, !dbg !7
+  atomicrmw or ptr %a, i8 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_or_acquire
-; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_or(ptr %a, i8 0, i32 2), !dbg
 
-define void @atomic8_xor_acquire(i8* %a) nounwind uwtable {
+define void @atomic8_xor_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i8* %a, i8 0 acquire, !dbg !7
+  atomicrmw xor ptr %a, i8 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xor_acquire
-; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(ptr %a, i8 0, i32 2), !dbg
 
-define void @atomic8_nand_acquire(i8* %a) nounwind uwtable {
+define void @atomic8_nand_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i8* %a, i8 0 acquire, !dbg !7
+  atomicrmw nand ptr %a, i8 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_nand_acquire
-; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(ptr %a, i8 0, i32 2), !dbg
 
-define void @atomic8_xchg_release(i8* %a) nounwind uwtable {
+define void @atomic8_xchg_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i8* %a, i8 0 release, !dbg !7
+  atomicrmw xchg ptr %a, i8 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xchg_release
-; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 3), !dbg
+; CHECK: call i8 @__tsan_atomic8_exchange(ptr %a, i8 0, i32 3), !dbg
 
-define void @atomic8_add_release(i8* %a) nounwind uwtable {
+define void @atomic8_add_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i8* %a, i8 0 release, !dbg !7
+  atomicrmw add ptr %a, i8 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_add_release
-; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 3), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_add(ptr %a, i8 0, i32 3), !dbg
 
-define void @atomic8_sub_release(i8* %a) nounwind uwtable {
+define void @atomic8_sub_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i8* %a, i8 0 release, !dbg !7
+  atomicrmw sub ptr %a, i8 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_sub_release
-; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 3), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(ptr %a, i8 0, i32 3), !dbg
 
-define void @atomic8_and_release(i8* %a) nounwind uwtable {
+define void @atomic8_and_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i8* %a, i8 0 release, !dbg !7
+  atomicrmw and ptr %a, i8 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_and_release
-; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 3), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_and(ptr %a, i8 0, i32 3), !dbg
 
-define void @atomic8_or_release(i8* %a) nounwind uwtable {
+define void @atomic8_or_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i8* %a, i8 0 release, !dbg !7
+  atomicrmw or ptr %a, i8 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_or_release
-; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 3), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_or(ptr %a, i8 0, i32 3), !dbg
 
-define void @atomic8_xor_release(i8* %a) nounwind uwtable {
+define void @atomic8_xor_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i8* %a, i8 0 release, !dbg !7
+  atomicrmw xor ptr %a, i8 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xor_release
-; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 3), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(ptr %a, i8 0, i32 3), !dbg
 
-define void @atomic8_nand_release(i8* %a) nounwind uwtable {
+define void @atomic8_nand_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i8* %a, i8 0 release, !dbg !7
+  atomicrmw nand ptr %a, i8 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_nand_release
-; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 3), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(ptr %a, i8 0, i32 3), !dbg
 
-define void @atomic8_xchg_acq_rel(i8* %a) nounwind uwtable {
+define void @atomic8_xchg_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i8* %a, i8 0 acq_rel, !dbg !7
+  atomicrmw xchg ptr %a, i8 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xchg_acq_rel
-; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 4), !dbg
+; CHECK: call i8 @__tsan_atomic8_exchange(ptr %a, i8 0, i32 4), !dbg
 
-define void @atomic8_add_acq_rel(i8* %a) nounwind uwtable {
+define void @atomic8_add_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i8* %a, i8 0 acq_rel, !dbg !7
+  atomicrmw add ptr %a, i8 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_add_acq_rel
-; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 4), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_add(ptr %a, i8 0, i32 4), !dbg
 
-define void @atomic8_sub_acq_rel(i8* %a) nounwind uwtable {
+define void @atomic8_sub_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i8* %a, i8 0 acq_rel, !dbg !7
+  atomicrmw sub ptr %a, i8 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_sub_acq_rel
-; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 4), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(ptr %a, i8 0, i32 4), !dbg
 
-define void @atomic8_and_acq_rel(i8* %a) nounwind uwtable {
+define void @atomic8_and_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i8* %a, i8 0 acq_rel, !dbg !7
+  atomicrmw and ptr %a, i8 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_and_acq_rel
-; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 4), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_and(ptr %a, i8 0, i32 4), !dbg
 
-define void @atomic8_or_acq_rel(i8* %a) nounwind uwtable {
+define void @atomic8_or_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i8* %a, i8 0 acq_rel, !dbg !7
+  atomicrmw or ptr %a, i8 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_or_acq_rel
-; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 4), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_or(ptr %a, i8 0, i32 4), !dbg
 
-define void @atomic8_xor_acq_rel(i8* %a) nounwind uwtable {
+define void @atomic8_xor_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i8* %a, i8 0 acq_rel, !dbg !7
+  atomicrmw xor ptr %a, i8 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xor_acq_rel
-; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 4), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(ptr %a, i8 0, i32 4), !dbg
 
-define void @atomic8_nand_acq_rel(i8* %a) nounwind uwtable {
+define void @atomic8_nand_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i8* %a, i8 0 acq_rel, !dbg !7
+  atomicrmw nand ptr %a, i8 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_nand_acq_rel
-; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 4), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(ptr %a, i8 0, i32 4), !dbg
 
-define void @atomic8_xchg_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_xchg_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i8* %a, i8 0 seq_cst, !dbg !7
+  atomicrmw xchg ptr %a, i8 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xchg_seq_cst
-; CHECK: call i8 @__tsan_atomic8_exchange(i8* %a, i8 0, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_exchange(ptr %a, i8 0, i32 5), !dbg
 
-define void @atomic8_add_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_add_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i8* %a, i8 0 seq_cst, !dbg !7
+  atomicrmw add ptr %a, i8 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_add_seq_cst
-; CHECK: call i8 @__tsan_atomic8_fetch_add(i8* %a, i8 0, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_add(ptr %a, i8 0, i32 5), !dbg
 
-define void @atomic8_sub_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_sub_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i8* %a, i8 0 seq_cst, !dbg !7
+  atomicrmw sub ptr %a, i8 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_sub_seq_cst
-; CHECK: call i8 @__tsan_atomic8_fetch_sub(i8* %a, i8 0, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_sub(ptr %a, i8 0, i32 5), !dbg
 
-define void @atomic8_and_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_and_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i8* %a, i8 0 seq_cst, !dbg !7
+  atomicrmw and ptr %a, i8 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_and_seq_cst
-; CHECK: call i8 @__tsan_atomic8_fetch_and(i8* %a, i8 0, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_and(ptr %a, i8 0, i32 5), !dbg
 
-define void @atomic8_or_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_or_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i8* %a, i8 0 seq_cst, !dbg !7
+  atomicrmw or ptr %a, i8 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_or_seq_cst
-; CHECK: call i8 @__tsan_atomic8_fetch_or(i8* %a, i8 0, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_or(ptr %a, i8 0, i32 5), !dbg
 
-define void @atomic8_xor_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_xor_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i8* %a, i8 0 seq_cst, !dbg !7
+  atomicrmw xor ptr %a, i8 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_xor_seq_cst
-; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_xor(ptr %a, i8 0, i32 5), !dbg
 
-define void @atomic8_nand_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_nand_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i8* %a, i8 0 seq_cst, !dbg !7
+  atomicrmw nand ptr %a, i8 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_nand_seq_cst
-; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_fetch_nand(ptr %a, i8 0, i32 5), !dbg
 
-define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable {
+define void @atomic8_cas_monotonic(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i8* %a, i8 0, i8 1 monotonic monotonic, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 monotonic acquire, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 monotonic seq_cst, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 monotonic monotonic, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 monotonic acquire, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 monotonic seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_cas_monotonic
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0, i32 0), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0, i32 2), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 0, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 0, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 0, i32 5), !dbg
 
-define void @atomic8_cas_acquire(i8* %a) nounwind uwtable {
+define void @atomic8_cas_acquire(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i8* %a, i8 0, i8 1 acquire monotonic, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 acquire acquire, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 acquire seq_cst, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 acquire monotonic, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 acquire acquire, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 acquire seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_cas_acquire
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2, i32 0), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2, i32 2), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 2, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 2, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 2, i32 5), !dbg
 
-define void @atomic8_cas_release(i8* %a) nounwind uwtable {
+define void @atomic8_cas_release(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i8* %a, i8 0, i8 1 release monotonic, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 release acquire, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 release seq_cst, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 release monotonic, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 release acquire, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 release seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_cas_release
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3, i32 0), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3, i32 2), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 3, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 3, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 3, i32 5), !dbg
 
-define void @atomic8_cas_acq_rel(i8* %a) nounwind uwtable {
+define void @atomic8_cas_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i8* %a, i8 0, i8 1 acq_rel monotonic, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 acq_rel acquire, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 acq_rel seq_cst, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 acq_rel monotonic, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 acq_rel acquire, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 acq_rel seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_cas_acq_rel
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4, i32 0), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4, i32 2), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 4, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 4, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 4, i32 5), !dbg
 
-define void @atomic8_cas_seq_cst(i8* %a) nounwind uwtable {
+define void @atomic8_cas_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i8* %a, i8 0, i8 1 seq_cst monotonic, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 seq_cst acquire, !dbg !7
-  cmpxchg i8* %a, i8 0, i8 1 seq_cst seq_cst, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 seq_cst monotonic, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 seq_cst acquire, !dbg !7
+  cmpxchg ptr %a, i8 0, i8 1 seq_cst seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic8_cas_seq_cst
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5, i32 0), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5, i32 2), !dbg
-; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5, i32 5), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 5, i32 0), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 5, i32 2), !dbg
+; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(ptr %a, i8 0, i8 1, i32 5, i32 5), !dbg
 
-define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable {
+define i16 @atomic16_load_unordered(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i16, i16* %a unordered, align 2, !dbg !7
+  %0 = load atomic i16, ptr %a unordered, align 2, !dbg !7
   ret i16 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic16_load_unordered
-; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_load(ptr %a, i32 0), !dbg
 
-define i16 @atomic16_load_monotonic(i16* %a) nounwind uwtable {
+define i16 @atomic16_load_monotonic(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i16, i16* %a monotonic, align 2, !dbg !7
+  %0 = load atomic i16, ptr %a monotonic, align 2, !dbg !7
   ret i16 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic16_load_monotonic
-; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_load(ptr %a, i32 0), !dbg
 
-define i16 @atomic16_load_acquire(i16* %a) nounwind uwtable {
+define i16 @atomic16_load_acquire(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i16, i16* %a acquire, align 2, !dbg !7
+  %0 = load atomic i16, ptr %a acquire, align 2, !dbg !7
   ret i16 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic16_load_acquire
-; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_load(ptr %a, i32 2), !dbg
 
-define i16 @atomic16_load_seq_cst(i16* %a) nounwind uwtable {
+define i16 @atomic16_load_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i16, i16* %a seq_cst, align 2, !dbg !7
+  %0 = load atomic i16, ptr %a seq_cst, align 2, !dbg !7
   ret i16 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic16_load_seq_cst
-; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_load(ptr %a, i32 5), !dbg
 
-define void @atomic16_store_unordered(i16* %a) nounwind uwtable {
+define void @atomic16_store_unordered(ptr %a) nounwind uwtable {
 entry:
-  store atomic i16 0, i16* %a unordered, align 2, !dbg !7
+  store atomic i16 0, ptr %a unordered, align 2, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_store_unordered
-; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic16_store(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_store_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_store_monotonic(ptr %a) nounwind uwtable {
 entry:
-  store atomic i16 0, i16* %a monotonic, align 2, !dbg !7
+  store atomic i16 0, ptr %a monotonic, align 2, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_store_monotonic
-; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic16_store(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_store_release(i16* %a) nounwind uwtable {
+define void @atomic16_store_release(ptr %a) nounwind uwtable {
 entry:
-  store atomic i16 0, i16* %a release, align 2, !dbg !7
+  store atomic i16 0, ptr %a release, align 2, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_store_release
-; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 3), !dbg
+; CHECK: call void @__tsan_atomic16_store(ptr %a, i16 0, i32 3), !dbg
 
-define void @atomic16_store_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_store_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  store atomic i16 0, i16* %a seq_cst, align 2, !dbg !7
+  store atomic i16 0, ptr %a seq_cst, align 2, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_store_seq_cst
-; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 5), !dbg
+; CHECK: call void @__tsan_atomic16_store(ptr %a, i16 0, i32 5), !dbg
 
-define void @atomic16_xchg_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_xchg_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i16* %a, i16 0 monotonic, !dbg !7
+  atomicrmw xchg ptr %a, i16 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xchg_monotonic
-; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_exchange(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_add_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_add_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i16* %a, i16 0 monotonic, !dbg !7
+  atomicrmw add ptr %a, i16 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_add_monotonic
-; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_add(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_sub_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_sub_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i16* %a, i16 0 monotonic, !dbg !7
+  atomicrmw sub ptr %a, i16 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_sub_monotonic
-; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_and_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_and_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i16* %a, i16 0 monotonic, !dbg !7
+  atomicrmw and ptr %a, i16 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_and_monotonic
-; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_and(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_or_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_or_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i16* %a, i16 0 monotonic, !dbg !7
+  atomicrmw or ptr %a, i16 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_or_monotonic
-; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_or(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_xor_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_xor_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i16* %a, i16 0 monotonic, !dbg !7
+  atomicrmw xor ptr %a, i16 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xor_monotonic
-; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_nand_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_nand_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i16* %a, i16 0 monotonic, !dbg !7
+  atomicrmw nand ptr %a, i16 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_nand_monotonic
-; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(ptr %a, i16 0, i32 0), !dbg
 
-define void @atomic16_xchg_acquire(i16* %a) nounwind uwtable {
+define void @atomic16_xchg_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i16* %a, i16 0 acquire, !dbg !7
+  atomicrmw xchg ptr %a, i16 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xchg_acquire
-; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_exchange(ptr %a, i16 0, i32 2), !dbg
 
-define void @atomic16_add_acquire(i16* %a) nounwind uwtable {
+define void @atomic16_add_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i16* %a, i16 0 acquire, !dbg !7
+  atomicrmw add ptr %a, i16 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_add_acquire
-; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_add(ptr %a, i16 0, i32 2), !dbg
 
-define void @atomic16_sub_acquire(i16* %a) nounwind uwtable {
+define void @atomic16_sub_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i16* %a, i16 0 acquire, !dbg !7
+  atomicrmw sub ptr %a, i16 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_sub_acquire
-; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(ptr %a, i16 0, i32 2), !dbg
 
-define void @atomic16_and_acquire(i16* %a) nounwind uwtable {
+define void @atomic16_and_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i16* %a, i16 0 acquire, !dbg !7
+  atomicrmw and ptr %a, i16 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_and_acquire
-; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_and(ptr %a, i16 0, i32 2), !dbg
 
-define void @atomic16_or_acquire(i16* %a) nounwind uwtable {
+define void @atomic16_or_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i16* %a, i16 0 acquire, !dbg !7
+  atomicrmw or ptr %a, i16 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_or_acquire
-; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_or(ptr %a, i16 0, i32 2), !dbg
 
-define void @atomic16_xor_acquire(i16* %a) nounwind uwtable {
+define void @atomic16_xor_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i16* %a, i16 0 acquire, !dbg !7
+  atomicrmw xor ptr %a, i16 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xor_acquire
-; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(ptr %a, i16 0, i32 2), !dbg
 
-define void @atomic16_nand_acquire(i16* %a) nounwind uwtable {
+define void @atomic16_nand_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i16* %a, i16 0 acquire, !dbg !7
+  atomicrmw nand ptr %a, i16 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_nand_acquire
-; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(ptr %a, i16 0, i32 2), !dbg
 
-define void @atomic16_xchg_release(i16* %a) nounwind uwtable {
+define void @atomic16_xchg_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i16* %a, i16 0 release, !dbg !7
+  atomicrmw xchg ptr %a, i16 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xchg_release
-; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 3), !dbg
+; CHECK: call i16 @__tsan_atomic16_exchange(ptr %a, i16 0, i32 3), !dbg
 
-define void @atomic16_add_release(i16* %a) nounwind uwtable {
+define void @atomic16_add_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i16* %a, i16 0 release, !dbg !7
+  atomicrmw add ptr %a, i16 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_add_release
-; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 3), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_add(ptr %a, i16 0, i32 3), !dbg
 
-define void @atomic16_sub_release(i16* %a) nounwind uwtable {
+define void @atomic16_sub_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i16* %a, i16 0 release, !dbg !7
+  atomicrmw sub ptr %a, i16 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_sub_release
-; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 3), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(ptr %a, i16 0, i32 3), !dbg
 
-define void @atomic16_and_release(i16* %a) nounwind uwtable {
+define void @atomic16_and_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i16* %a, i16 0 release, !dbg !7
+  atomicrmw and ptr %a, i16 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_and_release
-; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 3), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_and(ptr %a, i16 0, i32 3), !dbg
 
-define void @atomic16_or_release(i16* %a) nounwind uwtable {
+define void @atomic16_or_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i16* %a, i16 0 release, !dbg !7
+  atomicrmw or ptr %a, i16 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_or_release
-; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 3), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_or(ptr %a, i16 0, i32 3), !dbg
 
-define void @atomic16_xor_release(i16* %a) nounwind uwtable {
+define void @atomic16_xor_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i16* %a, i16 0 release, !dbg !7
+  atomicrmw xor ptr %a, i16 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xor_release
-; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 3), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(ptr %a, i16 0, i32 3), !dbg
 
-define void @atomic16_nand_release(i16* %a) nounwind uwtable {
+define void @atomic16_nand_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i16* %a, i16 0 release, !dbg !7
+  atomicrmw nand ptr %a, i16 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_nand_release
-; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 3), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(ptr %a, i16 0, i32 3), !dbg
 
-define void @atomic16_xchg_acq_rel(i16* %a) nounwind uwtable {
+define void @atomic16_xchg_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i16* %a, i16 0 acq_rel, !dbg !7
+  atomicrmw xchg ptr %a, i16 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xchg_acq_rel
-; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 4), !dbg
+; CHECK: call i16 @__tsan_atomic16_exchange(ptr %a, i16 0, i32 4), !dbg
 
-define void @atomic16_add_acq_rel(i16* %a) nounwind uwtable {
+define void @atomic16_add_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i16* %a, i16 0 acq_rel, !dbg !7
+  atomicrmw add ptr %a, i16 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_add_acq_rel
-; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 4), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_add(ptr %a, i16 0, i32 4), !dbg
 
-define void @atomic16_sub_acq_rel(i16* %a) nounwind uwtable {
+define void @atomic16_sub_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i16* %a, i16 0 acq_rel, !dbg !7
+  atomicrmw sub ptr %a, i16 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_sub_acq_rel
-; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 4), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(ptr %a, i16 0, i32 4), !dbg
 
-define void @atomic16_and_acq_rel(i16* %a) nounwind uwtable {
+define void @atomic16_and_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i16* %a, i16 0 acq_rel, !dbg !7
+  atomicrmw and ptr %a, i16 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_and_acq_rel
-; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 4), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_and(ptr %a, i16 0, i32 4), !dbg
 
-define void @atomic16_or_acq_rel(i16* %a) nounwind uwtable {
+define void @atomic16_or_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i16* %a, i16 0 acq_rel, !dbg !7
+  atomicrmw or ptr %a, i16 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_or_acq_rel
-; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 4), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_or(ptr %a, i16 0, i32 4), !dbg
 
-define void @atomic16_xor_acq_rel(i16* %a) nounwind uwtable {
+define void @atomic16_xor_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i16* %a, i16 0 acq_rel, !dbg !7
+  atomicrmw xor ptr %a, i16 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xor_acq_rel
-; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 4), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(ptr %a, i16 0, i32 4), !dbg
 
-define void @atomic16_nand_acq_rel(i16* %a) nounwind uwtable {
+define void @atomic16_nand_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i16* %a, i16 0 acq_rel, !dbg !7
+  atomicrmw nand ptr %a, i16 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_nand_acq_rel
-; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 4), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(ptr %a, i16 0, i32 4), !dbg
 
-define void @atomic16_xchg_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_xchg_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i16* %a, i16 0 seq_cst, !dbg !7
+  atomicrmw xchg ptr %a, i16 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xchg_seq_cst
-; CHECK: call i16 @__tsan_atomic16_exchange(i16* %a, i16 0, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_exchange(ptr %a, i16 0, i32 5), !dbg
 
-define void @atomic16_add_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_add_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i16* %a, i16 0 seq_cst, !dbg !7
+  atomicrmw add ptr %a, i16 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_add_seq_cst
-; CHECK: call i16 @__tsan_atomic16_fetch_add(i16* %a, i16 0, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_add(ptr %a, i16 0, i32 5), !dbg
 
-define void @atomic16_sub_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_sub_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i16* %a, i16 0 seq_cst, !dbg !7
+  atomicrmw sub ptr %a, i16 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_sub_seq_cst
-; CHECK: call i16 @__tsan_atomic16_fetch_sub(i16* %a, i16 0, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_sub(ptr %a, i16 0, i32 5), !dbg
 
-define void @atomic16_and_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_and_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i16* %a, i16 0 seq_cst, !dbg !7
+  atomicrmw and ptr %a, i16 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_and_seq_cst
-; CHECK: call i16 @__tsan_atomic16_fetch_and(i16* %a, i16 0, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_and(ptr %a, i16 0, i32 5), !dbg
 
-define void @atomic16_or_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_or_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i16* %a, i16 0 seq_cst, !dbg !7
+  atomicrmw or ptr %a, i16 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_or_seq_cst
-; CHECK: call i16 @__tsan_atomic16_fetch_or(i16* %a, i16 0, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_or(ptr %a, i16 0, i32 5), !dbg
 
-define void @atomic16_xor_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_xor_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i16* %a, i16 0 seq_cst, !dbg !7
+  atomicrmw xor ptr %a, i16 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_xor_seq_cst
-; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_xor(ptr %a, i16 0, i32 5), !dbg
 
-define void @atomic16_nand_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_nand_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i16* %a, i16 0 seq_cst, !dbg !7
+  atomicrmw nand ptr %a, i16 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_nand_seq_cst
-; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_fetch_nand(ptr %a, i16 0, i32 5), !dbg
 
-define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable {
+define void @atomic16_cas_monotonic(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i16* %a, i16 0, i16 1 monotonic monotonic, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 monotonic acquire, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 monotonic seq_cst, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 monotonic monotonic, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 monotonic acquire, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 monotonic seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_cas_monotonic
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0, i32 0), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0, i32 2), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 0, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 0, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 0, i32 5), !dbg
 
-define void @atomic16_cas_acquire(i16* %a) nounwind uwtable {
+define void @atomic16_cas_acquire(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i16* %a, i16 0, i16 1 acquire monotonic, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 acquire acquire, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 acquire seq_cst, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 acquire monotonic, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 acquire acquire, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 acquire seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_cas_acquire
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2, i32 0), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2, i32 2), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 2, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 2, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 2, i32 5), !dbg
 
-define void @atomic16_cas_release(i16* %a) nounwind uwtable {
+define void @atomic16_cas_release(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i16* %a, i16 0, i16 1 release monotonic, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 release acquire, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 release seq_cst, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 release monotonic, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 release acquire, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 release seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_cas_release
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3, i32 0), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3, i32 2), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 3, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 3, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 3, i32 5), !dbg
 
-define void @atomic16_cas_acq_rel(i16* %a) nounwind uwtable {
+define void @atomic16_cas_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i16* %a, i16 0, i16 1 acq_rel monotonic, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 acq_rel acquire, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 acq_rel seq_cst, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 acq_rel monotonic, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 acq_rel acquire, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 acq_rel seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_cas_acq_rel
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4, i32 0), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4, i32 2), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 4, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 4, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 4, i32 5), !dbg
 
-define void @atomic16_cas_seq_cst(i16* %a) nounwind uwtable {
+define void @atomic16_cas_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i16* %a, i16 0, i16 1 seq_cst monotonic, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 seq_cst acquire, !dbg !7
-  cmpxchg i16* %a, i16 0, i16 1 seq_cst seq_cst, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 seq_cst monotonic, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 seq_cst acquire, !dbg !7
+  cmpxchg ptr %a, i16 0, i16 1 seq_cst seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic16_cas_seq_cst
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5, i32 0), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5, i32 2), !dbg
-; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5, i32 5), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 5, i32 0), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 5, i32 2), !dbg
+; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(ptr %a, i16 0, i16 1, i32 5, i32 5), !dbg
 
-define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
+define i32 @atomic32_load_unordered(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i32, i32* %a unordered, align 4, !dbg !7
+  %0 = load atomic i32, ptr %a unordered, align 4, !dbg !7
   ret i32 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic32_load_unordered
-; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_load(ptr %a, i32 0), !dbg
 
-define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable {
+define i32 @atomic32_load_monotonic(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i32, i32* %a monotonic, align 4, !dbg !7
+  %0 = load atomic i32, ptr %a monotonic, align 4, !dbg !7
   ret i32 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic32_load_monotonic
-; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_load(ptr %a, i32 0), !dbg
 
-define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable {
+define i32 @atomic32_load_acquire(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i32, i32* %a acquire, align 4, !dbg !7
+  %0 = load atomic i32, ptr %a acquire, align 4, !dbg !7
   ret i32 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic32_load_acquire
-; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_load(ptr %a, i32 2), !dbg
 
-define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable {
+define i32 @atomic32_load_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i32, i32* %a seq_cst, align 4, !dbg !7
+  %0 = load atomic i32, ptr %a seq_cst, align 4, !dbg !7
   ret i32 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic32_load_seq_cst
-; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_load(ptr %a, i32 5), !dbg
 
-define void @atomic32_store_unordered(i32* %a) nounwind uwtable {
+define void @atomic32_store_unordered(ptr %a) nounwind uwtable {
 entry:
-  store atomic i32 0, i32* %a unordered, align 4, !dbg !7
+  store atomic i32 0, ptr %a unordered, align 4, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_store_unordered
-; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic32_store(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_store_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_store_monotonic(ptr %a) nounwind uwtable {
 entry:
-  store atomic i32 0, i32* %a monotonic, align 4, !dbg !7
+  store atomic i32 0, ptr %a monotonic, align 4, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_store_monotonic
-; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic32_store(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_store_release(i32* %a) nounwind uwtable {
+define void @atomic32_store_release(ptr %a) nounwind uwtable {
 entry:
-  store atomic i32 0, i32* %a release, align 4, !dbg !7
+  store atomic i32 0, ptr %a release, align 4, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_store_release
-; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 3), !dbg
+; CHECK: call void @__tsan_atomic32_store(ptr %a, i32 0, i32 3), !dbg
 
-define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_store_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  store atomic i32 0, i32* %a seq_cst, align 4, !dbg !7
+  store atomic i32 0, ptr %a seq_cst, align 4, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_store_seq_cst
-; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 5), !dbg
+; CHECK: call void @__tsan_atomic32_store(ptr %a, i32 0, i32 5), !dbg
 
-define void @atomic32_xchg_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_xchg_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i32* %a, i32 0 monotonic, !dbg !7
+  atomicrmw xchg ptr %a, i32 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xchg_monotonic
-; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_exchange(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_add_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_add_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i32* %a, i32 0 monotonic, !dbg !7
+  atomicrmw add ptr %a, i32 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_add_monotonic
-; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_add(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_sub_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_sub_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i32* %a, i32 0 monotonic, !dbg !7
+  atomicrmw sub ptr %a, i32 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_sub_monotonic
-; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_and_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_and_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i32* %a, i32 0 monotonic, !dbg !7
+  atomicrmw and ptr %a, i32 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_and_monotonic
-; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_and(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_or_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_or_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i32* %a, i32 0 monotonic, !dbg !7
+  atomicrmw or ptr %a, i32 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_or_monotonic
-; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_or(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_xor_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_xor_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i32* %a, i32 0 monotonic, !dbg !7
+  atomicrmw xor ptr %a, i32 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xor_monotonic
-; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_nand_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_nand_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i32* %a, i32 0 monotonic, !dbg !7
+  atomicrmw nand ptr %a, i32 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_nand_monotonic
-; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(ptr %a, i32 0, i32 0), !dbg
 
-define void @atomic32_xchg_acquire(i32* %a) nounwind uwtable {
+define void @atomic32_xchg_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i32* %a, i32 0 acquire, !dbg !7
+  atomicrmw xchg ptr %a, i32 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xchg_acquire
-; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_exchange(ptr %a, i32 0, i32 2), !dbg
 
-define void @atomic32_add_acquire(i32* %a) nounwind uwtable {
+define void @atomic32_add_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i32* %a, i32 0 acquire, !dbg !7
+  atomicrmw add ptr %a, i32 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_add_acquire
-; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_add(ptr %a, i32 0, i32 2), !dbg
 
-define void @atomic32_sub_acquire(i32* %a) nounwind uwtable {
+define void @atomic32_sub_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i32* %a, i32 0 acquire, !dbg !7
+  atomicrmw sub ptr %a, i32 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_sub_acquire
-; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(ptr %a, i32 0, i32 2), !dbg
 
-define void @atomic32_and_acquire(i32* %a) nounwind uwtable {
+define void @atomic32_and_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i32* %a, i32 0 acquire, !dbg !7
+  atomicrmw and ptr %a, i32 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_and_acquire
-; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_and(ptr %a, i32 0, i32 2), !dbg
 
-define void @atomic32_or_acquire(i32* %a) nounwind uwtable {
+define void @atomic32_or_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i32* %a, i32 0 acquire, !dbg !7
+  atomicrmw or ptr %a, i32 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_or_acquire
-; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_or(ptr %a, i32 0, i32 2), !dbg
 
-define void @atomic32_xor_acquire(i32* %a) nounwind uwtable {
+define void @atomic32_xor_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i32* %a, i32 0 acquire, !dbg !7
+  atomicrmw xor ptr %a, i32 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xor_acquire
-; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(ptr %a, i32 0, i32 2), !dbg
 
-define void @atomic32_nand_acquire(i32* %a) nounwind uwtable {
+define void @atomic32_nand_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i32* %a, i32 0 acquire, !dbg !7
+  atomicrmw nand ptr %a, i32 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_nand_acquire
-; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(ptr %a, i32 0, i32 2), !dbg
 
-define void @atomic32_xchg_release(i32* %a) nounwind uwtable {
+define void @atomic32_xchg_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i32* %a, i32 0 release, !dbg !7
+  atomicrmw xchg ptr %a, i32 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xchg_release
-; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 3), !dbg
+; CHECK: call i32 @__tsan_atomic32_exchange(ptr %a, i32 0, i32 3), !dbg
 
-define void @atomic32_add_release(i32* %a) nounwind uwtable {
+define void @atomic32_add_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i32* %a, i32 0 release, !dbg !7
+  atomicrmw add ptr %a, i32 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_add_release
-; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 3), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_add(ptr %a, i32 0, i32 3), !dbg
 
-define void @atomic32_sub_release(i32* %a) nounwind uwtable {
+define void @atomic32_sub_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i32* %a, i32 0 release, !dbg !7
+  atomicrmw sub ptr %a, i32 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_sub_release
-; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 3), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(ptr %a, i32 0, i32 3), !dbg
 
-define void @atomic32_and_release(i32* %a) nounwind uwtable {
+define void @atomic32_and_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i32* %a, i32 0 release, !dbg !7
+  atomicrmw and ptr %a, i32 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_and_release
-; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 3), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_and(ptr %a, i32 0, i32 3), !dbg
 
-define void @atomic32_or_release(i32* %a) nounwind uwtable {
+define void @atomic32_or_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i32* %a, i32 0 release, !dbg !7
+  atomicrmw or ptr %a, i32 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_or_release
-; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 3), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_or(ptr %a, i32 0, i32 3), !dbg
 
-define void @atomic32_xor_release(i32* %a) nounwind uwtable {
+define void @atomic32_xor_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i32* %a, i32 0 release, !dbg !7
+  atomicrmw xor ptr %a, i32 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xor_release
-; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 3), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(ptr %a, i32 0, i32 3), !dbg
 
-define void @atomic32_nand_release(i32* %a) nounwind uwtable {
+define void @atomic32_nand_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i32* %a, i32 0 release, !dbg !7
+  atomicrmw nand ptr %a, i32 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_nand_release
-; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 3), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(ptr %a, i32 0, i32 3), !dbg
 
-define void @atomic32_xchg_acq_rel(i32* %a) nounwind uwtable {
+define void @atomic32_xchg_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i32* %a, i32 0 acq_rel, !dbg !7
+  atomicrmw xchg ptr %a, i32 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xchg_acq_rel
-; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 4), !dbg
+; CHECK: call i32 @__tsan_atomic32_exchange(ptr %a, i32 0, i32 4), !dbg
 
-define void @atomic32_add_acq_rel(i32* %a) nounwind uwtable {
+define void @atomic32_add_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i32* %a, i32 0 acq_rel, !dbg !7
+  atomicrmw add ptr %a, i32 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_add_acq_rel
-; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 4), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_add(ptr %a, i32 0, i32 4), !dbg
 
-define void @atomic32_sub_acq_rel(i32* %a) nounwind uwtable {
+define void @atomic32_sub_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i32* %a, i32 0 acq_rel, !dbg !7
+  atomicrmw sub ptr %a, i32 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_sub_acq_rel
-; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 4), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(ptr %a, i32 0, i32 4), !dbg
 
-define void @atomic32_and_acq_rel(i32* %a) nounwind uwtable {
+define void @atomic32_and_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i32* %a, i32 0 acq_rel, !dbg !7
+  atomicrmw and ptr %a, i32 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_and_acq_rel
-; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 4), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_and(ptr %a, i32 0, i32 4), !dbg
 
-define void @atomic32_or_acq_rel(i32* %a) nounwind uwtable {
+define void @atomic32_or_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i32* %a, i32 0 acq_rel, !dbg !7
+  atomicrmw or ptr %a, i32 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_or_acq_rel
-; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 4), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_or(ptr %a, i32 0, i32 4), !dbg
 
-define void @atomic32_xor_acq_rel(i32* %a) nounwind uwtable {
+define void @atomic32_xor_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i32* %a, i32 0 acq_rel, !dbg !7
+  atomicrmw xor ptr %a, i32 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xor_acq_rel
-; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 4), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(ptr %a, i32 0, i32 4), !dbg
 
-define void @atomic32_nand_acq_rel(i32* %a) nounwind uwtable {
+define void @atomic32_nand_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i32* %a, i32 0 acq_rel, !dbg !7
+  atomicrmw nand ptr %a, i32 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_nand_acq_rel
-; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 4), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(ptr %a, i32 0, i32 4), !dbg
 
-define void @atomic32_xchg_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_xchg_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i32* %a, i32 0 seq_cst, !dbg !7
+  atomicrmw xchg ptr %a, i32 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xchg_seq_cst
-; CHECK: call i32 @__tsan_atomic32_exchange(i32* %a, i32 0, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_exchange(ptr %a, i32 0, i32 5), !dbg
 
-define void @atomic32_add_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_add_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i32* %a, i32 0 seq_cst, !dbg !7
+  atomicrmw add ptr %a, i32 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_add_seq_cst
-; CHECK: call i32 @__tsan_atomic32_fetch_add(i32* %a, i32 0, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_add(ptr %a, i32 0, i32 5), !dbg
 
-define void @atomic32_sub_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_sub_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i32* %a, i32 0 seq_cst, !dbg !7
+  atomicrmw sub ptr %a, i32 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_sub_seq_cst
-; CHECK: call i32 @__tsan_atomic32_fetch_sub(i32* %a, i32 0, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_sub(ptr %a, i32 0, i32 5), !dbg
 
-define void @atomic32_and_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_and_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i32* %a, i32 0 seq_cst, !dbg !7
+  atomicrmw and ptr %a, i32 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_and_seq_cst
-; CHECK: call i32 @__tsan_atomic32_fetch_and(i32* %a, i32 0, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_and(ptr %a, i32 0, i32 5), !dbg
 
-define void @atomic32_or_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_or_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i32* %a, i32 0 seq_cst, !dbg !7
+  atomicrmw or ptr %a, i32 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_or_seq_cst
-; CHECK: call i32 @__tsan_atomic32_fetch_or(i32* %a, i32 0, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_or(ptr %a, i32 0, i32 5), !dbg
 
-define void @atomic32_xor_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_xor_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i32* %a, i32 0 seq_cst, !dbg !7
+  atomicrmw xor ptr %a, i32 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_xor_seq_cst
-; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_xor(ptr %a, i32 0, i32 5), !dbg
 
-define void @atomic32_nand_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_nand_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i32* %a, i32 0 seq_cst, !dbg !7
+  atomicrmw nand ptr %a, i32 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_nand_seq_cst
-; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_fetch_nand(ptr %a, i32 0, i32 5), !dbg
 
-define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable {
+define void @atomic32_cas_monotonic(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 monotonic acquire, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 monotonic seq_cst, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 monotonic acquire, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 monotonic seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_cas_monotonic
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0, i32 0), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0, i32 2), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 0, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 0, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 0, i32 5), !dbg
 
-define void @atomic32_cas_acquire(i32* %a) nounwind uwtable {
+define void @atomic32_cas_acquire(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i32* %a, i32 0, i32 1 acquire monotonic, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 acquire acquire, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 acquire seq_cst, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 acquire monotonic, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 acquire acquire, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 acquire seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_cas_acquire
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2, i32 0), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2, i32 2), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 2, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 2, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 2, i32 5), !dbg
 
-define void @atomic32_cas_release(i32* %a) nounwind uwtable {
+define void @atomic32_cas_release(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i32* %a, i32 0, i32 1 release monotonic, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 release acquire, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 release seq_cst, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 release monotonic, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 release acquire, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 release seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_cas_release
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3, i32 0), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3, i32 2), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 3, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 3, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 3, i32 5), !dbg
 
-define void @atomic32_cas_acq_rel(i32* %a) nounwind uwtable {
+define void @atomic32_cas_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i32* %a, i32 0, i32 1 acq_rel monotonic, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 acq_rel acquire, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 acq_rel seq_cst, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 acq_rel monotonic, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 acq_rel acquire, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 acq_rel seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_cas_acq_rel
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4, i32 0), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4, i32 2), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 4, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 4, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 4, i32 5), !dbg
 
-define void @atomic32_cas_seq_cst(i32* %a) nounwind uwtable {
+define void @atomic32_cas_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i32* %a, i32 0, i32 1 seq_cst monotonic, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 seq_cst acquire, !dbg !7
-  cmpxchg i32* %a, i32 0, i32 1 seq_cst seq_cst, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 seq_cst monotonic, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 seq_cst acquire, !dbg !7
+  cmpxchg ptr %a, i32 0, i32 1 seq_cst seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic32_cas_seq_cst
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5, i32 0), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5, i32 2), !dbg
-; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5, i32 5), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 5, i32 0), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 5, i32 2), !dbg
+; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(ptr %a, i32 0, i32 1, i32 5, i32 5), !dbg
 
-define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
+define i64 @atomic64_load_unordered(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i64, i64* %a unordered, align 8, !dbg !7
+  %0 = load atomic i64, ptr %a unordered, align 8, !dbg !7
   ret i64 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic64_load_unordered
-; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_load(ptr %a, i32 0), !dbg
 
-define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable {
+define i64 @atomic64_load_monotonic(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i64, i64* %a monotonic, align 8, !dbg !7
+  %0 = load atomic i64, ptr %a monotonic, align 8, !dbg !7
   ret i64 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic64_load_monotonic
-; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_load(ptr %a, i32 0), !dbg
 
-define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable {
+define i64 @atomic64_load_acquire(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i64, i64* %a acquire, align 8, !dbg !7
+  %0 = load atomic i64, ptr %a acquire, align 8, !dbg !7
   ret i64 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic64_load_acquire
-; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_load(ptr %a, i32 2), !dbg
 
-define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable {
+define i64 @atomic64_load_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i64, i64* %a seq_cst, align 8, !dbg !7
+  %0 = load atomic i64, ptr %a seq_cst, align 8, !dbg !7
   ret i64 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic64_load_seq_cst
-; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_load(ptr %a, i32 5), !dbg
 
-define i8* @atomic64_load_seq_cst_ptr_ty(i8** %a) nounwind uwtable {
+define ptr @atomic64_load_seq_cst_ptr_ty(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i8*, i8** %a seq_cst, align 8, !dbg !7
-  ret i8* %0, !dbg !7
+  %0 = load atomic ptr, ptr %a seq_cst, align 8, !dbg !7
+  ret ptr %0, !dbg !7
 }
 ; CHECK-LABEL: atomic64_load_seq_cst
-; CHECK: bitcast i8** %{{.+}} to i64*
-; CHECK-NEXT: call i64 @__tsan_atomic64_load(i64* %{{.+}}, i32 5), !dbg
-; CHECK-NEXT: inttoptr i64 %{{.+}} to i8*
+; CHECK: call i64 @__tsan_atomic64_load(ptr %a, i32 5), !dbg
+; CHECK-NEXT: inttoptr i64 %{{.+}} to ptr
 
-define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
+define void @atomic64_store_unordered(ptr %a) nounwind uwtable {
 entry:
-  store atomic i64 0, i64* %a unordered, align 8, !dbg !7
+  store atomic i64 0, ptr %a unordered, align 8, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_store_unordered
-; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic64_store(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_store_monotonic(i64* %a) nounwind uwtable {
+define void @atomic64_store_monotonic(ptr %a) nounwind uwtable {
 entry:
-  store atomic i64 0, i64* %a monotonic, align 8, !dbg !7
+  store atomic i64 0, ptr %a monotonic, align 8, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_store_monotonic
-; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic64_store(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_store_release(i64* %a) nounwind uwtable {
+define void @atomic64_store_release(ptr %a) nounwind uwtable {
 entry:
-  store atomic i64 0, i64* %a release, align 8, !dbg !7
+  store atomic i64 0, ptr %a release, align 8, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_store_release
-; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 3), !dbg
+; CHECK: call void @__tsan_atomic64_store(ptr %a, i64 0, i32 3), !dbg
 
-define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_store_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  store atomic i64 0, i64* %a seq_cst, align 8, !dbg !7
+  store atomic i64 0, ptr %a seq_cst, align 8, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_store_seq_cst
-; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 5), !dbg
+; CHECK: call void @__tsan_atomic64_store(ptr %a, i64 0, i32 5), !dbg
 
-define void @atomic64_store_seq_cst_ptr_ty(i8** %a, i8* %v) nounwind uwtable {
+define void @atomic64_store_seq_cst_ptr_ty(ptr %a, ptr %v) nounwind uwtable {
 entry:
-  store atomic i8* %v, i8** %a seq_cst, align 8, !dbg !7
+  store atomic ptr %v, ptr %a seq_cst, align 8, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_store_seq_cst
-; CHECK: %{{.*}} = bitcast i8** %{{.*}} to i64*
-; CHECK-NEXT: %{{.*}} = ptrtoint i8* %{{.*}} to i64
-; CHECK-NEXT: call void @__tsan_atomic64_store(i64* %{{.*}}, i64 %{{.*}}, i32 5), !dbg
-
-define void @atomic64_xchg_monotonic(i64* %a) nounwind uwtable {
+; CHECK: call void @__tsan_atomic64_store(ptr %a, i64 %{{.*}}, i32 5), !dbg
+define void @atomic64_xchg_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i64* %a, i64 0 monotonic, !dbg !7
+  atomicrmw xchg ptr %a, i64 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xchg_monotonic
-; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_exchange(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_add_monotonic(i64* %a) nounwind uwtable {
+define void @atomic64_add_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i64* %a, i64 0 monotonic, !dbg !7
+  atomicrmw add ptr %a, i64 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_add_monotonic
-; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_add(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_sub_monotonic(i64* %a) nounwind uwtable {
+define void @atomic64_sub_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i64* %a, i64 0 monotonic, !dbg !7
+  atomicrmw sub ptr %a, i64 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_sub_monotonic
-; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_and_monotonic(i64* %a) nounwind uwtable {
+define void @atomic64_and_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i64* %a, i64 0 monotonic, !dbg !7
+  atomicrmw and ptr %a, i64 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_and_monotonic
-; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_and(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_or_monotonic(i64* %a) nounwind uwtable {
+define void @atomic64_or_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i64* %a, i64 0 monotonic, !dbg !7
+  atomicrmw or ptr %a, i64 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_or_monotonic
-; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_or(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_xor_monotonic(i64* %a) nounwind uwtable {
+define void @atomic64_xor_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i64* %a, i64 0 monotonic, !dbg !7
+  atomicrmw xor ptr %a, i64 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xor_monotonic
-; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_nand_monotonic(i64* %a) nounwind uwtable {
+define void @atomic64_nand_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i64* %a, i64 0 monotonic, !dbg !7
+  atomicrmw nand ptr %a, i64 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_nand_monotonic
-; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(ptr %a, i64 0, i32 0), !dbg
 
-define void @atomic64_xchg_acquire(i64* %a) nounwind uwtable {
+define void @atomic64_xchg_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i64* %a, i64 0 acquire, !dbg !7
+  atomicrmw xchg ptr %a, i64 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xchg_acquire
-; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_exchange(ptr %a, i64 0, i32 2), !dbg
 
-define void @atomic64_add_acquire(i64* %a) nounwind uwtable {
+define void @atomic64_add_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i64* %a, i64 0 acquire, !dbg !7
+  atomicrmw add ptr %a, i64 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_add_acquire
-; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_add(ptr %a, i64 0, i32 2), !dbg
 
-define void @atomic64_sub_acquire(i64* %a) nounwind uwtable {
+define void @atomic64_sub_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i64* %a, i64 0 acquire, !dbg !7
+  atomicrmw sub ptr %a, i64 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_sub_acquire
-; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(ptr %a, i64 0, i32 2), !dbg
 
-define void @atomic64_and_acquire(i64* %a) nounwind uwtable {
+define void @atomic64_and_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i64* %a, i64 0 acquire, !dbg !7
+  atomicrmw and ptr %a, i64 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_and_acquire
-; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_and(ptr %a, i64 0, i32 2), !dbg
 
-define void @atomic64_or_acquire(i64* %a) nounwind uwtable {
+define void @atomic64_or_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i64* %a, i64 0 acquire, !dbg !7
+  atomicrmw or ptr %a, i64 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_or_acquire
-; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_or(ptr %a, i64 0, i32 2), !dbg
 
-define void @atomic64_xor_acquire(i64* %a) nounwind uwtable {
+define void @atomic64_xor_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i64* %a, i64 0 acquire, !dbg !7
+  atomicrmw xor ptr %a, i64 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xor_acquire
-; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(ptr %a, i64 0, i32 2), !dbg
 
-define void @atomic64_nand_acquire(i64* %a) nounwind uwtable {
+define void @atomic64_nand_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i64* %a, i64 0 acquire, !dbg !7
+  atomicrmw nand ptr %a, i64 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_nand_acquire
-; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(ptr %a, i64 0, i32 2), !dbg
 
-define void @atomic64_xchg_release(i64* %a) nounwind uwtable {
+define void @atomic64_xchg_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i64* %a, i64 0 release, !dbg !7
+  atomicrmw xchg ptr %a, i64 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xchg_release
-; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 3), !dbg
+; CHECK: call i64 @__tsan_atomic64_exchange(ptr %a, i64 0, i32 3), !dbg
 
-define void @atomic64_add_release(i64* %a) nounwind uwtable {
+define void @atomic64_add_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i64* %a, i64 0 release, !dbg !7
+  atomicrmw add ptr %a, i64 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_add_release
-; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 3), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_add(ptr %a, i64 0, i32 3), !dbg
 
-define void @atomic64_sub_release(i64* %a) nounwind uwtable {
+define void @atomic64_sub_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i64* %a, i64 0 release, !dbg !7
+  atomicrmw sub ptr %a, i64 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_sub_release
-; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 3), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(ptr %a, i64 0, i32 3), !dbg
 
-define void @atomic64_and_release(i64* %a) nounwind uwtable {
+define void @atomic64_and_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i64* %a, i64 0 release, !dbg !7
+  atomicrmw and ptr %a, i64 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_and_release
-; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 3), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_and(ptr %a, i64 0, i32 3), !dbg
 
-define void @atomic64_or_release(i64* %a) nounwind uwtable {
+define void @atomic64_or_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i64* %a, i64 0 release, !dbg !7
+  atomicrmw or ptr %a, i64 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_or_release
-; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 3), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_or(ptr %a, i64 0, i32 3), !dbg
 
-define void @atomic64_xor_release(i64* %a) nounwind uwtable {
+define void @atomic64_xor_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i64* %a, i64 0 release, !dbg !7
+  atomicrmw xor ptr %a, i64 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xor_release
-; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 3), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(ptr %a, i64 0, i32 3), !dbg
 
-define void @atomic64_nand_release(i64* %a) nounwind uwtable {
+define void @atomic64_nand_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i64* %a, i64 0 release, !dbg !7
+  atomicrmw nand ptr %a, i64 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_nand_release
-; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 3), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(ptr %a, i64 0, i32 3), !dbg
 
-define void @atomic64_xchg_acq_rel(i64* %a) nounwind uwtable {
+define void @atomic64_xchg_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i64* %a, i64 0 acq_rel, !dbg !7
+  atomicrmw xchg ptr %a, i64 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xchg_acq_rel
-; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 4), !dbg
+; CHECK: call i64 @__tsan_atomic64_exchange(ptr %a, i64 0, i32 4), !dbg
 
-define void @atomic64_add_acq_rel(i64* %a) nounwind uwtable {
+define void @atomic64_add_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i64* %a, i64 0 acq_rel, !dbg !7
+  atomicrmw add ptr %a, i64 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_add_acq_rel
-; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 4), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_add(ptr %a, i64 0, i32 4), !dbg
 
-define void @atomic64_sub_acq_rel(i64* %a) nounwind uwtable {
+define void @atomic64_sub_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i64* %a, i64 0 acq_rel, !dbg !7
+  atomicrmw sub ptr %a, i64 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_sub_acq_rel
-; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 4), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(ptr %a, i64 0, i32 4), !dbg
 
-define void @atomic64_and_acq_rel(i64* %a) nounwind uwtable {
+define void @atomic64_and_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i64* %a, i64 0 acq_rel, !dbg !7
+  atomicrmw and ptr %a, i64 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_and_acq_rel
-; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 4), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_and(ptr %a, i64 0, i32 4), !dbg
 
-define void @atomic64_or_acq_rel(i64* %a) nounwind uwtable {
+define void @atomic64_or_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i64* %a, i64 0 acq_rel, !dbg !7
+  atomicrmw or ptr %a, i64 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_or_acq_rel
-; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 4), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_or(ptr %a, i64 0, i32 4), !dbg
 
-define void @atomic64_xor_acq_rel(i64* %a) nounwind uwtable {
+define void @atomic64_xor_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i64* %a, i64 0 acq_rel, !dbg !7
+  atomicrmw xor ptr %a, i64 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xor_acq_rel
-; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 4), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(ptr %a, i64 0, i32 4), !dbg
 
-define void @atomic64_nand_acq_rel(i64* %a) nounwind uwtable {
+define void @atomic64_nand_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i64* %a, i64 0 acq_rel, !dbg !7
+  atomicrmw nand ptr %a, i64 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_nand_acq_rel
-; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 4), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(ptr %a, i64 0, i32 4), !dbg
 
-define void @atomic64_xchg_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_xchg_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i64* %a, i64 0 seq_cst, !dbg !7
+  atomicrmw xchg ptr %a, i64 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xchg_seq_cst
-; CHECK: call i64 @__tsan_atomic64_exchange(i64* %a, i64 0, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_exchange(ptr %a, i64 0, i32 5), !dbg
 
-define void @atomic64_add_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_add_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i64* %a, i64 0 seq_cst, !dbg !7
+  atomicrmw add ptr %a, i64 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_add_seq_cst
-; CHECK: call i64 @__tsan_atomic64_fetch_add(i64* %a, i64 0, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_add(ptr %a, i64 0, i32 5), !dbg
 
-define void @atomic64_sub_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_sub_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i64* %a, i64 0 seq_cst, !dbg !7
+  atomicrmw sub ptr %a, i64 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_sub_seq_cst
-; CHECK: call i64 @__tsan_atomic64_fetch_sub(i64* %a, i64 0, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_sub(ptr %a, i64 0, i32 5), !dbg
 
-define void @atomic64_and_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_and_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i64* %a, i64 0 seq_cst, !dbg !7
+  atomicrmw and ptr %a, i64 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_and_seq_cst
-; CHECK: call i64 @__tsan_atomic64_fetch_and(i64* %a, i64 0, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_and(ptr %a, i64 0, i32 5), !dbg
 
-define void @atomic64_or_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_or_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i64* %a, i64 0 seq_cst, !dbg !7
+  atomicrmw or ptr %a, i64 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_or_seq_cst
-; CHECK: call i64 @__tsan_atomic64_fetch_or(i64* %a, i64 0, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_or(ptr %a, i64 0, i32 5), !dbg
 
-define void @atomic64_xor_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_xor_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i64* %a, i64 0 seq_cst, !dbg !7
+  atomicrmw xor ptr %a, i64 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_xor_seq_cst
-; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_xor(ptr %a, i64 0, i32 5), !dbg
 
-define void @atomic64_nand_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_nand_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i64* %a, i64 0 seq_cst, !dbg !7
+  atomicrmw nand ptr %a, i64 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_nand_seq_cst
-; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_fetch_nand(ptr %a, i64 0, i32 5), !dbg
 
-define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable {
+define void @atomic64_cas_monotonic(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i64* %a, i64 0, i64 1 monotonic monotonic, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 monotonic acquire, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 monotonic seq_cst, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 monotonic monotonic, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 monotonic acquire, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 monotonic seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_cas_monotonic
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0, i32 0), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0, i32 2), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 0, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 0, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 0, i32 5), !dbg
 
-define void @atomic64_cas_acquire(i64* %a) nounwind uwtable {
+define void @atomic64_cas_acquire(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i64* %a, i64 0, i64 1 acquire monotonic, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 acquire acquire, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 acquire seq_cst, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 acquire monotonic, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 acquire acquire, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 acquire seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_cas_acquire
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2, i32 0), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2, i32 2), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 2, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 2, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 2, i32 5), !dbg
 
-define void @atomic64_cas_release(i64* %a) nounwind uwtable {
+define void @atomic64_cas_release(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i64* %a, i64 0, i64 1 release monotonic, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 release acquire, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 release seq_cst, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 release monotonic, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 release acquire, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 release seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_cas_release
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3, i32 0), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3, i32 2), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 3, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 3, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 3, i32 5), !dbg
 
-define void @atomic64_cas_acq_rel(i64* %a) nounwind uwtable {
+define void @atomic64_cas_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i64* %a, i64 0, i64 1 acq_rel monotonic, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 acq_rel acquire, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 acq_rel seq_cst, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 acq_rel monotonic, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 acq_rel acquire, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 acq_rel seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_cas_acq_rel
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4, i32 0), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4, i32 2), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 4, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 4, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 4, i32 5), !dbg
 
-define void @atomic64_cas_seq_cst(i64* %a) nounwind uwtable {
+define void @atomic64_cas_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i64* %a, i64 0, i64 1 seq_cst monotonic, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 seq_cst acquire, !dbg !7
-  cmpxchg i64* %a, i64 0, i64 1 seq_cst seq_cst, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 seq_cst monotonic, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 seq_cst acquire, !dbg !7
+  cmpxchg ptr %a, i64 0, i64 1 seq_cst seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic64_cas_seq_cst
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 0), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 2), !dbg
-; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 5), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 5, i32 0), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 5, i32 2), !dbg
+; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(ptr %a, i64 0, i64 1, i32 5, i32 5), !dbg
 
-define void @atomic64_cas_seq_cst_ptr_ty(i8** %a, i8* %v1, i8* %v2) nounwind uwtable {
+define void @atomic64_cas_seq_cst_ptr_ty(ptr %a, ptr %v1, ptr %v2) nounwind uwtable {
 entry:
-  cmpxchg i8** %a, i8* %v1, i8* %v2 seq_cst seq_cst, !dbg !7
+  cmpxchg ptr %a, ptr %v1, ptr %v2 seq_cst seq_cst, !dbg !7
   ret void
 }
 ; CHECK-LABEL: atomic64_cas_seq_cst
-; CHECK: {{.*}} = ptrtoint i8* %v1 to i64
-; CHECK-NEXT: {{.*}} = ptrtoint i8* %v2 to i64
-; CHECK-NEXT: {{.*}} = bitcast i8** %a to i64*
-; CHECK-NEXT: {{.*}} = call i64 @__tsan_atomic64_compare_exchange_val(i64* {{.*}}, i64 {{.*}}, i64 {{.*}}, i32 5, i32 5), !dbg
+; CHECK: {{.*}} = ptrtoint ptr %v1 to i64
+; CHECK-NEXT: {{.*}} = ptrtoint ptr %v2 to i64
+; CHECK-NEXT: {{.*}} = call i64 @__tsan_atomic64_compare_exchange_val(ptr {{.*}}, i64 {{.*}}, i64 {{.*}}, i32 5, i32 5), !dbg
 ; CHECK-NEXT: {{.*}} = icmp eq i64
-; CHECK-NEXT: {{.*}} = inttoptr i64 {{.*}} to i8*
-; CHECK-NEXT: {{.*}} = insertvalue { i8*, i1 } undef, i8* {{.*}}, 0
-; CHECK-NEXT: {{.*}} = insertvalue { i8*, i1 } {{.*}}, i1 {{.*}}, 1
+; CHECK-NEXT: {{.*}} = inttoptr i64 {{.*}} to ptr
+; CHECK-NEXT: {{.*}} = insertvalue { ptr, i1 } undef, ptr {{.*}}, 0
+; CHECK-NEXT: {{.*}} = insertvalue { ptr, i1 } {{.*}}, i1 {{.*}}, 1
 
-define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
+define i128 @atomic128_load_unordered(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i128, i128* %a unordered, align 16, !dbg !7
+  %0 = load atomic i128, ptr %a unordered, align 16, !dbg !7
   ret i128 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic128_load_unordered
-; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_load(ptr %a, i32 0), !dbg
 
-define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable {
+define i128 @atomic128_load_monotonic(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i128, i128* %a monotonic, align 16, !dbg !7
+  %0 = load atomic i128, ptr %a monotonic, align 16, !dbg !7
   ret i128 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic128_load_monotonic
-; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_load(ptr %a, i32 0), !dbg
 
-define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable {
+define i128 @atomic128_load_acquire(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i128, i128* %a acquire, align 16, !dbg !7
+  %0 = load atomic i128, ptr %a acquire, align 16, !dbg !7
   ret i128 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic128_load_acquire
-; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_load(ptr %a, i32 2), !dbg
 
-define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable {
+define i128 @atomic128_load_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  %0 = load atomic i128, i128* %a seq_cst, align 16, !dbg !7
+  %0 = load atomic i128, ptr %a seq_cst, align 16, !dbg !7
   ret i128 %0, !dbg !7
 }
 ; CHECK-LABEL: atomic128_load_seq_cst
-; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_load(ptr %a, i32 5), !dbg
 
-define void @atomic128_store_unordered(i128* %a) nounwind uwtable {
+define void @atomic128_store_unordered(ptr %a) nounwind uwtable {
 entry:
-  store atomic i128 0, i128* %a unordered, align 16, !dbg !7
+  store atomic i128 0, ptr %a unordered, align 16, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_store_unordered
-; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic128_store(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_store_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_store_monotonic(ptr %a) nounwind uwtable {
 entry:
-  store atomic i128 0, i128* %a monotonic, align 16, !dbg !7
+  store atomic i128 0, ptr %a monotonic, align 16, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_store_monotonic
-; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call void @__tsan_atomic128_store(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_store_release(i128* %a) nounwind uwtable {
+define void @atomic128_store_release(ptr %a) nounwind uwtable {
 entry:
-  store atomic i128 0, i128* %a release, align 16, !dbg !7
+  store atomic i128 0, ptr %a release, align 16, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_store_release
-; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 3), !dbg
+; CHECK: call void @__tsan_atomic128_store(ptr %a, i128 0, i32 3), !dbg
 
-define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_store_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  store atomic i128 0, i128* %a seq_cst, align 16, !dbg !7
+  store atomic i128 0, ptr %a seq_cst, align 16, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_store_seq_cst
-; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 5), !dbg
+; CHECK: call void @__tsan_atomic128_store(ptr %a, i128 0, i32 5), !dbg
 
-define void @atomic128_xchg_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_xchg_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i128* %a, i128 0 monotonic, !dbg !7
+  atomicrmw xchg ptr %a, i128 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xchg_monotonic
-; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_exchange(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_add_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_add_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i128* %a, i128 0 monotonic, !dbg !7
+  atomicrmw add ptr %a, i128 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_add_monotonic
-; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_add(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_sub_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_sub_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i128* %a, i128 0 monotonic, !dbg !7
+  atomicrmw sub ptr %a, i128 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_sub_monotonic
-; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_and_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_and_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i128* %a, i128 0 monotonic, !dbg !7
+  atomicrmw and ptr %a, i128 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_and_monotonic
-; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_and(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_or_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_or_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i128* %a, i128 0 monotonic, !dbg !7
+  atomicrmw or ptr %a, i128 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_or_monotonic
-; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_or(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_xor_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_xor_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i128* %a, i128 0 monotonic, !dbg !7
+  atomicrmw xor ptr %a, i128 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xor_monotonic
-; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_nand_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_nand_monotonic(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i128* %a, i128 0 monotonic, !dbg !7
+  atomicrmw nand ptr %a, i128 0 monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_nand_monotonic
-; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(ptr %a, i128 0, i32 0), !dbg
 
-define void @atomic128_xchg_acquire(i128* %a) nounwind uwtable {
+define void @atomic128_xchg_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i128* %a, i128 0 acquire, !dbg !7
+  atomicrmw xchg ptr %a, i128 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xchg_acquire
-; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_exchange(ptr %a, i128 0, i32 2), !dbg
 
-define void @atomic128_add_acquire(i128* %a) nounwind uwtable {
+define void @atomic128_add_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i128* %a, i128 0 acquire, !dbg !7
+  atomicrmw add ptr %a, i128 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_add_acquire
-; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_add(ptr %a, i128 0, i32 2), !dbg
 
-define void @atomic128_sub_acquire(i128* %a) nounwind uwtable {
+define void @atomic128_sub_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i128* %a, i128 0 acquire, !dbg !7
+  atomicrmw sub ptr %a, i128 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_sub_acquire
-; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(ptr %a, i128 0, i32 2), !dbg
 
-define void @atomic128_and_acquire(i128* %a) nounwind uwtable {
+define void @atomic128_and_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i128* %a, i128 0 acquire, !dbg !7
+  atomicrmw and ptr %a, i128 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_and_acquire
-; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_and(ptr %a, i128 0, i32 2), !dbg
 
-define void @atomic128_or_acquire(i128* %a) nounwind uwtable {
+define void @atomic128_or_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i128* %a, i128 0 acquire, !dbg !7
+  atomicrmw or ptr %a, i128 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_or_acquire
-; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_or(ptr %a, i128 0, i32 2), !dbg
 
-define void @atomic128_xor_acquire(i128* %a) nounwind uwtable {
+define void @atomic128_xor_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i128* %a, i128 0 acquire, !dbg !7
+  atomicrmw xor ptr %a, i128 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xor_acquire
-; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(ptr %a, i128 0, i32 2), !dbg
 
-define void @atomic128_nand_acquire(i128* %a) nounwind uwtable {
+define void @atomic128_nand_acquire(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i128* %a, i128 0 acquire, !dbg !7
+  atomicrmw nand ptr %a, i128 0 acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_nand_acquire
-; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(ptr %a, i128 0, i32 2), !dbg
 
-define void @atomic128_xchg_release(i128* %a) nounwind uwtable {
+define void @atomic128_xchg_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i128* %a, i128 0 release, !dbg !7
+  atomicrmw xchg ptr %a, i128 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xchg_release
-; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 3), !dbg
+; CHECK: call i128 @__tsan_atomic128_exchange(ptr %a, i128 0, i32 3), !dbg
 
-define void @atomic128_add_release(i128* %a) nounwind uwtable {
+define void @atomic128_add_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i128* %a, i128 0 release, !dbg !7
+  atomicrmw add ptr %a, i128 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_add_release
-; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 3), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_add(ptr %a, i128 0, i32 3), !dbg
 
-define void @atomic128_sub_release(i128* %a) nounwind uwtable {
+define void @atomic128_sub_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i128* %a, i128 0 release, !dbg !7
+  atomicrmw sub ptr %a, i128 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_sub_release
-; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 3), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(ptr %a, i128 0, i32 3), !dbg
 
-define void @atomic128_and_release(i128* %a) nounwind uwtable {
+define void @atomic128_and_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i128* %a, i128 0 release, !dbg !7
+  atomicrmw and ptr %a, i128 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_and_release
-; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 3), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_and(ptr %a, i128 0, i32 3), !dbg
 
-define void @atomic128_or_release(i128* %a) nounwind uwtable {
+define void @atomic128_or_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i128* %a, i128 0 release, !dbg !7
+  atomicrmw or ptr %a, i128 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_or_release
-; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 3), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_or(ptr %a, i128 0, i32 3), !dbg
 
-define void @atomic128_xor_release(i128* %a) nounwind uwtable {
+define void @atomic128_xor_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i128* %a, i128 0 release, !dbg !7
+  atomicrmw xor ptr %a, i128 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xor_release
-; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 3), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(ptr %a, i128 0, i32 3), !dbg
 
-define void @atomic128_nand_release(i128* %a) nounwind uwtable {
+define void @atomic128_nand_release(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i128* %a, i128 0 release, !dbg !7
+  atomicrmw nand ptr %a, i128 0 release, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_nand_release
-; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 3), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(ptr %a, i128 0, i32 3), !dbg
 
-define void @atomic128_xchg_acq_rel(i128* %a) nounwind uwtable {
+define void @atomic128_xchg_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i128* %a, i128 0 acq_rel, !dbg !7
+  atomicrmw xchg ptr %a, i128 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xchg_acq_rel
-; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 4), !dbg
+; CHECK: call i128 @__tsan_atomic128_exchange(ptr %a, i128 0, i32 4), !dbg
 
-define void @atomic128_add_acq_rel(i128* %a) nounwind uwtable {
+define void @atomic128_add_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i128* %a, i128 0 acq_rel, !dbg !7
+  atomicrmw add ptr %a, i128 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_add_acq_rel
-; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 4), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_add(ptr %a, i128 0, i32 4), !dbg
 
-define void @atomic128_sub_acq_rel(i128* %a) nounwind uwtable {
+define void @atomic128_sub_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i128* %a, i128 0 acq_rel, !dbg !7
+  atomicrmw sub ptr %a, i128 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_sub_acq_rel
-; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 4), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(ptr %a, i128 0, i32 4), !dbg
 
-define void @atomic128_and_acq_rel(i128* %a) nounwind uwtable {
+define void @atomic128_and_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i128* %a, i128 0 acq_rel, !dbg !7
+  atomicrmw and ptr %a, i128 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_and_acq_rel
-; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 4), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_and(ptr %a, i128 0, i32 4), !dbg
 
-define void @atomic128_or_acq_rel(i128* %a) nounwind uwtable {
+define void @atomic128_or_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i128* %a, i128 0 acq_rel, !dbg !7
+  atomicrmw or ptr %a, i128 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_or_acq_rel
-; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 4), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_or(ptr %a, i128 0, i32 4), !dbg
 
-define void @atomic128_xor_acq_rel(i128* %a) nounwind uwtable {
+define void @atomic128_xor_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i128* %a, i128 0 acq_rel, !dbg !7
+  atomicrmw xor ptr %a, i128 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xor_acq_rel
-; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 4), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(ptr %a, i128 0, i32 4), !dbg
 
-define void @atomic128_nand_acq_rel(i128* %a) nounwind uwtable {
+define void @atomic128_nand_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i128* %a, i128 0 acq_rel, !dbg !7
+  atomicrmw nand ptr %a, i128 0 acq_rel, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_nand_acq_rel
-; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 4), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(ptr %a, i128 0, i32 4), !dbg
 
-define void @atomic128_xchg_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_xchg_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xchg i128* %a, i128 0 seq_cst, !dbg !7
+  atomicrmw xchg ptr %a, i128 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xchg_seq_cst
-; CHECK: call i128 @__tsan_atomic128_exchange(i128* %a, i128 0, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_exchange(ptr %a, i128 0, i32 5), !dbg
 
-define void @atomic128_add_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_add_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw add i128* %a, i128 0 seq_cst, !dbg !7
+  atomicrmw add ptr %a, i128 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_add_seq_cst
-; CHECK: call i128 @__tsan_atomic128_fetch_add(i128* %a, i128 0, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_add(ptr %a, i128 0, i32 5), !dbg
 
-define void @atomic128_sub_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_sub_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw sub i128* %a, i128 0 seq_cst, !dbg !7
+  atomicrmw sub ptr %a, i128 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_sub_seq_cst
-; CHECK: call i128 @__tsan_atomic128_fetch_sub(i128* %a, i128 0, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_sub(ptr %a, i128 0, i32 5), !dbg
 
-define void @atomic128_and_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_and_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw and i128* %a, i128 0 seq_cst, !dbg !7
+  atomicrmw and ptr %a, i128 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_and_seq_cst
-; CHECK: call i128 @__tsan_atomic128_fetch_and(i128* %a, i128 0, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_and(ptr %a, i128 0, i32 5), !dbg
 
-define void @atomic128_or_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_or_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw or i128* %a, i128 0 seq_cst, !dbg !7
+  atomicrmw or ptr %a, i128 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_or_seq_cst
-; CHECK: call i128 @__tsan_atomic128_fetch_or(i128* %a, i128 0, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_or(ptr %a, i128 0, i32 5), !dbg
 
-define void @atomic128_xor_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_xor_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw xor i128* %a, i128 0 seq_cst, !dbg !7
+  atomicrmw xor ptr %a, i128 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_xor_seq_cst
-; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_xor(ptr %a, i128 0, i32 5), !dbg
 
-define void @atomic128_nand_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_nand_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  atomicrmw nand i128* %a, i128 0 seq_cst, !dbg !7
+  atomicrmw nand ptr %a, i128 0 seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_nand_seq_cst
-; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_fetch_nand(ptr %a, i128 0, i32 5), !dbg
 
-define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable {
+define void @atomic128_cas_monotonic(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i128* %a, i128 0, i128 1 monotonic monotonic, !dbg !7
+  cmpxchg ptr %a, i128 0, i128 1 monotonic monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_cas_monotonic
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 0, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(ptr %a, i128 0, i128 1, i32 0, i32 0), !dbg
 
-define void @atomic128_cas_acquire(i128* %a) nounwind uwtable {
+define void @atomic128_cas_acquire(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i128* %a, i128 0, i128 1 acquire acquire, !dbg !7
+  cmpxchg ptr %a, i128 0, i128 1 acquire acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_cas_acquire
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 2, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(ptr %a, i128 0, i128 1, i32 2, i32 2), !dbg
 
-define void @atomic128_cas_release(i128* %a) nounwind uwtable {
+define void @atomic128_cas_release(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i128* %a, i128 0, i128 1 release monotonic, !dbg !7
+  cmpxchg ptr %a, i128 0, i128 1 release monotonic, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_cas_release
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 3, i32 0), !dbg
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(ptr %a, i128 0, i128 1, i32 3, i32 0), !dbg
 
-define void @atomic128_cas_acq_rel(i128* %a) nounwind uwtable {
+define void @atomic128_cas_acq_rel(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i128* %a, i128 0, i128 1 acq_rel acquire, !dbg !7
+  cmpxchg ptr %a, i128 0, i128 1 acq_rel acquire, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_cas_acq_rel
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 4, i32 2), !dbg
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(ptr %a, i128 0, i128 1, i32 4, i32 2), !dbg
 
-define void @atomic128_cas_seq_cst(i128* %a) nounwind uwtable {
+define void @atomic128_cas_seq_cst(ptr %a) nounwind uwtable {
 entry:
-  cmpxchg i128* %a, i128 0, i128 1 seq_cst seq_cst, !dbg !7
+  cmpxchg ptr %a, i128 0, i128 1 seq_cst seq_cst, !dbg !7
   ret void, !dbg !7
 }
 ; CHECK-LABEL: atomic128_cas_seq_cst
-; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(i128* %a, i128 0, i128 1, i32 5, i32 5), !dbg
+; CHECK: call i128 @__tsan_atomic128_compare_exchange_val(ptr %a, i128 0, i128 1, i32 5, i32 5), !dbg
 
 define void @atomic_signal_fence_acquire() nounwind uwtable {
 entry:

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/capture.ll b/llvm/test/Instrumentation/ThreadSanitizer/capture.ll
index 6f0359b4bb021..8edf310df9823 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/capture.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/capture.ll
@@ -2,16 +2,16 @@
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-declare void @escape(i32*)
+declare void @escape(ptr)
 
- at sink = global i32* null, align 4
+ at sink = global ptr null, align 4
 
 define void @captured0() nounwind uwtable sanitize_thread {
 entry:
   %ptr = alloca i32, align 4
   ; escapes due to call
-  call void @escape(i32* %ptr)
-  store i32 42, i32* %ptr, align 4
+  call void @escape(ptr %ptr)
+  store i32 42, ptr %ptr, align 4
   ret void
 }
 ; CHECK-LABEL: define void @captured0
@@ -22,8 +22,8 @@ define void @captured1() nounwind uwtable sanitize_thread {
 entry:
   %ptr = alloca i32, align 4
   ; escapes due to store into global
-  store i32* %ptr, i32** @sink, align 8
-  store i32 42, i32* %ptr, align 4
+  store ptr %ptr, ptr @sink, align 8
+  store i32 42, ptr %ptr, align 4
   ret void
 }
 ; CHECK-LABEL: define void @captured1
@@ -34,12 +34,12 @@ entry:
 define void @captured2() nounwind uwtable sanitize_thread {
 entry:
   %ptr = alloca i32, align 4
-  %tmp = alloca i32*, align 8
+  %tmp = alloca ptr, align 8
   ; transitive escape
-  store i32* %ptr, i32** %tmp, align 8
-  %0 = load i32*, i32** %tmp, align 8
-  store i32* %0, i32** @sink, align 8
-  store i32 42, i32* %ptr, align 4
+  store ptr %ptr, ptr %tmp, align 8
+  %0 = load ptr, ptr %tmp, align 8
+  store ptr %0, ptr @sink, align 8
+  store i32 42, ptr %ptr, align 4
   ret void
 }
 ; CHECK-LABEL: define void @captured2
@@ -50,9 +50,9 @@ entry:
 define void @notcaptured0() nounwind uwtable sanitize_thread {
 entry:
   %ptr = alloca i32, align 4
-  store i32 42, i32* %ptr, align 4
+  store i32 42, ptr %ptr, align 4
   ; escapes due to call
-  call void @escape(i32* %ptr)
+  call void @escape(ptr %ptr)
   ret void
 }
 ; CHECK-LABEL: define void @notcaptured0
@@ -62,9 +62,9 @@ entry:
 define void @notcaptured1() nounwind uwtable sanitize_thread {
 entry:
   %ptr = alloca i32, align 4
-  store i32 42, i32* %ptr, align 4
+  store i32 42, ptr %ptr, align 4
   ; escapes due to store into global
-  store i32* %ptr, i32** @sink, align 8
+  store ptr %ptr, ptr @sink, align 8
   ret void
 }
 ; CHECK-LABEL: define void @notcaptured1
@@ -75,12 +75,12 @@ entry:
 define void @notcaptured2() nounwind uwtable sanitize_thread {
 entry:
   %ptr = alloca i32, align 4
-  %tmp = alloca i32*, align 8
-  store i32 42, i32* %ptr, align 4
+  %tmp = alloca ptr, align 8
+  store i32 42, ptr %ptr, align 4
   ; transitive escape
-  store i32* %ptr, i32** %tmp, align 8
-  %0 = load i32*, i32** %tmp, align 8
-  store i32* %0, i32** @sink, align 8
+  store ptr %ptr, ptr %tmp, align 8
+  %0 = load ptr, ptr %tmp, align 8
+  store ptr %0, ptr @sink, align 8
   ret void
 }
 ; CHECK-LABEL: define void @notcaptured2

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/debug_calls.ll b/llvm/test/Instrumentation/ThreadSanitizer/debug_calls.ll
index 2ea4cbfcb56bd..0f794bb954e9d 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/debug_calls.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/debug_calls.ll
@@ -2,12 +2,12 @@
 
 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 
-define void @Increment(i32* nocapture %0) local_unnamed_addr sanitize_thread !dbg !7 {
-  call void @llvm.dbg.value(metadata i32* %0, metadata !14, metadata !DIExpression()), !dbg !16
-  %2 = load i32, i32* %0, align 4, !dbg !17, !tbaa !18
+define void @Increment(ptr nocapture %0) local_unnamed_addr sanitize_thread !dbg !7 {
+  call void @llvm.dbg.value(metadata ptr %0, metadata !14, metadata !DIExpression()), !dbg !16
+  %2 = load i32, ptr %0, align 4, !dbg !17, !tbaa !18
   call void @llvm.dbg.value(metadata i32 %2, metadata !15, metadata !DIExpression()), !dbg !16
   %3 = add nsw i32 %2, 1, !dbg !22
-  store i32 %3, i32* %0, align 4, !dbg !23, !tbaa !18
+  store i32 %3, ptr %0, align 4, !dbg !23, !tbaa !18
   ret void, !dbg !24
 }
 ; CHECK-LABEL: define void @Increment

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/do-not-instrument-memory-access.ll b/llvm/test/Instrumentation/ThreadSanitizer/do-not-instrument-memory-access.ll
index 1ea5724a3c98e..c0efa250fd50a 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/do-not-instrument-memory-access.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/do-not-instrument-memory-access.ll
@@ -18,39 +18,39 @@ target triple = "x86_64-apple-macosx10.9"
 
 define i32 @test_gep() sanitize_thread {
 entry:
-  %pgocount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_gep, i64 0, i64 0)
+  %pgocount = load i64, ptr @__profc_test_gep
   %0 = add i64 %pgocount, 1
-  store i64 %0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_gep, i64 0, i64 0)
+  store i64 %0, ptr @__profc_test_gep
 
-  %gcovcount = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0)
+  %gcovcount = load i64, ptr @__llvm_gcov_ctr
   %1 = add i64 %gcovcount, 1
-  store i64 %1, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0)
+  store i64 %1, ptr @__llvm_gcov_ctr
 
-  %gcovcount.1 = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr.1, i64 0, i64 0)
+  %gcovcount.1 = load i64, ptr @__llvm_gcov_ctr.1
   %2 = add i64 %gcovcount.1, 1
-  store i64 %2, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr.1, i64 0, i64 0)
+  store i64 %2, ptr @__llvm_gcov_ctr.1
 
   ret i32 1
 }
 
 define i32 @test_bitcast() sanitize_thread {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* bitcast ([2 x i64]* @__profc_test_bitcast to <2 x i64>*), align 8
-  %.promoted5 = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_bitcast_foo, i64 0, i64 0), align 8
+  %0 = load <2 x i64>, ptr @__profc_test_bitcast, align 8
+  %.promoted5 = load i64, ptr @__profc_test_bitcast_foo, align 8
   %1 = add i64 %.promoted5, 10
   %2 = add <2 x i64> %0, <i64 1, i64 10>
-  store <2 x i64> %2, <2 x i64>* bitcast ([2 x i64]* @__profc_test_bitcast to <2 x i64>*), align 8
-  store i64 %1, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__profc_test_bitcast_foo, i64 0, i64 0), align 8
+  store <2 x i64> %2, ptr @__profc_test_bitcast, align 8
+  store i64 %1, ptr @__profc_test_bitcast_foo, align 8
   ret i32 undef
 }
 
 define void @test_load() sanitize_thread {
 entry:
-  %0 = load i32, i32* @__llvm_gcov_global_state_pred
-  store i32 1, i32* @__llvm_gcov_global_state_pred
+  %0 = load i32, ptr @__llvm_gcov_global_state_pred
+  store i32 1, ptr @__llvm_gcov_global_state_pred
 
-  %1 = load i32, i32* @__llvm_gcda_foo
-  store i32 1, i32* @__llvm_gcda_foo
+  %1 = load i32, ptr @__llvm_gcda_foo
+  store i32 1, ptr @__llvm_gcda_foo
 
   ret void
 }

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/eh.ll b/llvm/test/Instrumentation/ThreadSanitizer/eh.ll
index f9690b59ceb25..37a86559ab6e6 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/eh.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/eh.ll
@@ -35,13 +35,13 @@ define i32 @func2() sanitize_thread {
   ; CHECK: ret i32 0
 }
 
-define i32 @func3(i32* %p) sanitize_thread {
-  %a = load i32, i32* %p
+define i32 @func3(ptr %p) sanitize_thread {
+  %a = load i32, ptr %p
   ret i32 %a
-  ; CHECK: define i32 @func3(i32* %p)
+  ; CHECK: define i32 @func3(ptr %p)
   ; CHECK: call void @__tsan_func_entry
   ; CHECK: call void @__tsan_read4
-  ; CHECK: %a = load i32, i32* %p
+  ; CHECK: %a = load i32, ptr %p
   ; CHECK: call void @__tsan_func_exit()
   ; CHECK: ret i32 %a
 }

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/missing_dbg.ll b/llvm/test/Instrumentation/ThreadSanitizer/missing_dbg.ll
index 3909f5f19a84f..160856d586fa1 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/missing_dbg.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/missing_dbg.ll
@@ -2,29 +2,29 @@
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-define i32 @with_dbg(i32* %a) sanitize_thread !dbg !3 {
+define i32 @with_dbg(ptr %a) sanitize_thread !dbg !3 {
 entry:
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 ; CHECK-LABEL: @with_dbg
 ; CHECK-NEXT:  entry:
-; CHECK:       call void @__tsan_func_entry(i8* %0), !dbg [[DBG:![0-9]+]]
-; CHECK:       call void @__tsan_read4(i8* %1), !dbg [[DBG]]
+; CHECK:       call void @__tsan_func_entry(ptr %0), !dbg [[DBG:![0-9]+]]
+; CHECK:       call void @__tsan_read4(ptr %a), !dbg [[DBG]]
 ; CHECK:       call void @__tsan_func_exit(), !dbg [[DBG]]
 
-define i32 @without_dbg(i32* %a) sanitize_thread {
+define i32 @without_dbg(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 ; CHECK-LABEL: @without_dbg
 ; CHECK-NEXT:  entry:
-; CHECK-NOT:   call void @__tsan_func_entry(i8* %0), !dbg
-; CHECK-NOT:   call void @__tsan_read4(i8* %1), !dbg
+; CHECK-NOT:   call void @__tsan_func_entry(ptr %0), !dbg
+; CHECK-NOT:   call void @__tsan_read4(ptr %1), !dbg
 ; CHECK-NOT:   call void @__tsan_func_exit(), !dbg
-; CHECK:       call void @__tsan_func_entry(i8* %0)
-; CHECK:       call void @__tsan_read4(i8* %1)
+; CHECK:       call void @__tsan_func_entry(ptr %0)
+; CHECK:       call void @__tsan_read4(ptr %a)
 ; CHECK:       call void @__tsan_func_exit()
 
 !llvm.dbg.cu = !{!0}

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/no_sanitize_thread.ll b/llvm/test/Instrumentation/ThreadSanitizer/no_sanitize_thread.ll
index 290167da6dea5..965704c24bd8a 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/no_sanitize_thread.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/no_sanitize_thread.ll
@@ -4,31 +4,31 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "x86_64-unknown-linux-gnu"
 
 ; no sanitize_thread attribute here
-define i32 @read_4_bytes(i32* %a) {
+define i32 @read_4_bytes(ptr %a) {
 entry:
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 
-; CHECK: define i32 @read_4_bytes(i32* %a) {
+; CHECK: define i32 @read_4_bytes(ptr %a) {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 4
+; CHECK-NEXT:   %tmp1 = load i32, ptr %a, align 4
 ; CHECK: ret i32 %tmp1
 
 ; no sanitize_thread attribute here
-define i32 @read_4_bytes_and_call(i32* %a) {
+define i32 @read_4_bytes_and_call(ptr %a) {
 entry:
   call void @foo()
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 
-; CHECK: define i32 @read_4_bytes_and_call(i32* %a) {
+; CHECK: define i32 @read_4_bytes_and_call(ptr %a) {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %0 = call i8* @llvm.returnaddress(i32 0)
-; CHECK-NEXT:   call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT:   %0 = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:   call void @__tsan_func_entry(ptr %0)
 ; CHECK-NEXT:   call void @foo()
-; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 4
+; CHECK-NEXT:   %tmp1 = load i32, ptr %a, align 4
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK-NEXT:   ret i32 %tmp1
 

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/read_before_write.ll b/llvm/test/Instrumentation/ThreadSanitizer/read_before_write.ll
index 0b4021f9e7b7b..1ef283d8f8f4c 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/read_before_write.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/read_before_write.ll
@@ -5,11 +5,11 @@
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-define void @IncrementMe(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
+define void @IncrementMe(ptr nocapture %ptr) nounwind uwtable sanitize_thread {
 entry:
-  %0 = load i32, i32* %ptr, align 4
+  %0 = load i32, ptr %ptr, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* %ptr, align 4
+  store i32 %inc, ptr %ptr, align 4
   ret void
 }
 ; CHECK-LABEL: define void @IncrementMe
@@ -21,12 +21,12 @@ entry:
 ; CHECK-COMPOUND: __tsan_read_write4
 ; CHECK: ret void
 
-define void @IncrementMeWithCallInBetween(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
+define void @IncrementMeWithCallInBetween(ptr nocapture %ptr) nounwind uwtable sanitize_thread {
 entry:
-  %0 = load i32, i32* %ptr, align 4
+  %0 = load i32, ptr %ptr, align 4
   %inc = add nsw i32 %0, 1
   call void @foo()
-  store i32 %inc, i32* %ptr, align 4
+  store i32 %inc, ptr %ptr, align 4
   ret void
 }
 
@@ -37,11 +37,11 @@ entry:
 
 declare void @foo()
 
-define void @VolatileLoad(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
+define void @VolatileLoad(ptr nocapture %ptr) nounwind uwtable sanitize_thread {
 entry:
-  %0 = load volatile i32, i32* %ptr, align 4
+  %0 = load volatile i32, ptr %ptr, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* %ptr, align 4
+  store i32 %inc, ptr %ptr, align 4
   ret void
 }
 ; CHECK-LABEL: define void @VolatileLoad
@@ -51,11 +51,11 @@ entry:
 ; CHECK-COMPOUND-VOLATILE: __tsan_write4
 ; CHECK: ret void
 
-define void @VolatileStore(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
+define void @VolatileStore(ptr nocapture %ptr) nounwind uwtable sanitize_thread {
 entry:
-  %0 = load i32, i32* %ptr, align 4
+  %0 = load i32, ptr %ptr, align 4
   %inc = add nsw i32 %0, 1
-  store volatile i32 %inc, i32* %ptr, align 4
+  store volatile i32 %inc, ptr %ptr, align 4
   ret void
 }
 ; CHECK-LABEL: define void @VolatileStore
@@ -65,11 +65,11 @@ entry:
 ; CHECK-COMPOUND-VOLATILE: __tsan_volatile_write4
 ; CHECK: ret void
 
-define void @VolatileBoth(i32* nocapture %ptr) nounwind uwtable sanitize_thread {
+define void @VolatileBoth(ptr nocapture %ptr) nounwind uwtable sanitize_thread {
 entry:
-  %0 = load volatile i32, i32* %ptr, align 4
+  %0 = load volatile i32, ptr %ptr, align 4
   %inc = add nsw i32 %0, 1
-  store volatile i32 %inc, i32* %ptr, align 4
+  store volatile i32 %inc, ptr %ptr, align 4
   ret void
 }
 ; CHECK-LABEL: define void @VolatileBoth

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/read_from_global.ll b/llvm/test/Instrumentation/ThreadSanitizer/read_from_global.ll
index fffc626db215c..53a6b32f1f87a 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/read_from_global.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/read_from_global.ll
@@ -6,7 +6,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 @const_global = external constant i32
 define i32 @read_from_const_global() nounwind uwtable sanitize_thread readnone {
 entry:
-  %0 = load i32, i32* @const_global, align 4
+  %0 = load i32, ptr @const_global, align 4
   ret i32 %0
 }
 ; CHECK: define i32 @read_from_const_global
@@ -16,7 +16,7 @@ entry:
 @non_const_global = global i32 0, align 4
 define i32 @read_from_non_const_global() nounwind uwtable sanitize_thread readonly {
 entry:
-  %0 = load i32, i32* @non_const_global, align 4
+  %0 = load i32, ptr @non_const_global, align 4
   ret i32 %0
 }
 
@@ -28,8 +28,8 @@ entry:
 define i32 @read_from_const_global_array(i32 %idx) nounwind uwtable sanitize_thread readnone {
 entry:
   %idxprom = sext i32 %idx to i64
-  %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @const_global_array, i64 0, i64 %idxprom
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [10 x i32], ptr @const_global_array, i64 0, i64 %idxprom
+  %0 = load i32, ptr %arrayidx, align 4
   ret i32 %0
 }
 
@@ -37,13 +37,12 @@ entry:
 ; CHECK-NOT: __tsan
 ; CHECK: ret i32
 
-%struct.Foo = type { i32 (...)** }
-define void @call_virtual_func(%struct.Foo* %f) uwtable sanitize_thread {
+%struct.Foo = type { ptr }
+define void @call_virtual_func(ptr %f) uwtable sanitize_thread {
 entry:
-  %0 = bitcast %struct.Foo* %f to void (%struct.Foo*)***
-  %vtable = load void (%struct.Foo*)**, void (%struct.Foo*)*** %0, align 8, !tbaa !2
-  %1 = load void (%struct.Foo*)*, void (%struct.Foo*)** %vtable, align 8
-  call void %1(%struct.Foo* %f)
+  %vtable = load ptr, ptr %f, align 8, !tbaa !2
+  %0 = load ptr, ptr %vtable, align 8
+  call void %0(ptr %f)
   ret void
 }
 

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/sanitize-thread-no-checking.ll b/llvm/test/Instrumentation/ThreadSanitizer/sanitize-thread-no-checking.ll
index 5a5b21ff924d0..dbee198736569 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/sanitize-thread-no-checking.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/sanitize-thread-no-checking.ll
@@ -3,32 +3,32 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
 
-define i32 @"\01-[NoCalls dealloc]"(i32* %a) "sanitize_thread_no_checking_at_run_time" {
+define i32 @"\01-[NoCalls dealloc]"(ptr %a) "sanitize_thread_no_checking_at_run_time" {
 entry:
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 
-; CHECK: define i32 @"\01-[NoCalls dealloc]"(i32* %a)
+; CHECK: define i32 @"\01-[NoCalls dealloc]"(ptr %a)
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 4
+; CHECK-NEXT:   %tmp1 = load i32, ptr %a, align 4
 ; CHECK-NEXT:   ret i32 %tmp1
 
 declare void @"foo"() nounwind
 
-define i32 @"\01-[WithCalls dealloc]"(i32* %a) "sanitize_thread_no_checking_at_run_time" {
+define i32 @"\01-[WithCalls dealloc]"(ptr %a) "sanitize_thread_no_checking_at_run_time" {
 entry:
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   call void @foo()
   ret i32 %tmp1
 }
 
-; CHECK: define i32 @"\01-[WithCalls dealloc]"(i32* %a)
+; CHECK: define i32 @"\01-[WithCalls dealloc]"(ptr %a)
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %0 = call i8* @llvm.returnaddress(i32 0)
-; CHECK-NEXT:   call void @__tsan_func_entry(i8* %0)
+; CHECK-NEXT:   %0 = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:   call void @__tsan_func_entry(ptr %0)
 ; CHECK-NEXT:   call void @__tsan_ignore_thread_begin()
-; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 4
+; CHECK-NEXT:   %tmp1 = load i32, ptr %a, align 4
 ; CHECK-NEXT:   call void @foo()
 ; CHECK-NEXT:   call void @__tsan_ignore_thread_end()
 ; CHECK-NEXT:   call void @__tsan_func_exit()

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/str-nobuiltin.ll b/llvm/test/Instrumentation/ThreadSanitizer/str-nobuiltin.ll
index 96b501e30732c..ada360e1150d4 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/str-nobuiltin.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/str-nobuiltin.ll
@@ -4,13 +4,13 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
 
-declare i8* @memchr(i8* %a, i32 %b, i64 %c) nounwind
-declare i32 @memcmp(i8* %a, i8* %b, i64 %c) nounwind
-declare i32 @strcmp(i8* %a, i8* %b) nounwind
-declare i8* @strcpy(i8* %a, i8* %b) nounwind
-declare i8* @stpcpy(i8* %a, i8* %b) nounwind
-declare i64 @strlen(i8* %a) nounwind
-declare i64 @strnlen(i8* %a, i64 %b) nounwind
+declare ptr @memchr(ptr %a, i32 %b, i64 %c) nounwind
+declare i32 @memcmp(ptr %a, ptr %b, i64 %c) nounwind
+declare i32 @strcmp(ptr %a, ptr %b) nounwind
+declare ptr @strcpy(ptr %a, ptr %b) nounwind
+declare ptr @stpcpy(ptr %a, ptr %b) nounwind
+declare i64 @strlen(ptr %a) nounwind
+declare i64 @strnlen(ptr %a, i64 %b) nounwind
 
 ; CHECK: call{{.*}}@memchr{{.*}} #[[ATTR:[0-9]+]]
 ; CHECK: call{{.*}}@memcmp{{.*}} #[[ATTR]]
@@ -21,13 +21,13 @@ declare i64 @strnlen(i8* %a, i64 %b) nounwind
 ; CHECK: call{{.*}}@strnlen{{.*}} #[[ATTR]]
 ; attributes #[[ATTR]] = { nobuiltin }
 
-define void @f1(i8* %a, i8* %b) nounwind uwtable sanitize_thread {
-  tail call i8* @memchr(i8* %a, i32 1, i64 12)
-  tail call i32 @memcmp(i8* %a, i8* %b, i64 12)
-  tail call i32 @strcmp(i8* %a, i8* %b)
-  tail call i8* @strcpy(i8* %a, i8* %b)
-  tail call i8* @stpcpy(i8* %a, i8* %b)
-  tail call i64 @strlen(i8* %a)
-  tail call i64 @strnlen(i8* %a, i64 12)
+define void @f1(ptr %a, ptr %b) nounwind uwtable sanitize_thread {
+  tail call ptr @memchr(ptr %a, i32 1, i64 12)
+  tail call i32 @memcmp(ptr %a, ptr %b, i64 12)
+  tail call i32 @strcmp(ptr %a, ptr %b)
+  tail call ptr @strcpy(ptr %a, ptr %b)
+  tail call ptr @stpcpy(ptr %a, ptr %b)
+  tail call i64 @strlen(ptr %a)
+  tail call i64 @strnlen(ptr %a, i64 12)
   ret void
 }

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll b/llvm/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll
index 08439e9f415db..22b7f94999921 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll
@@ -10,11 +10,11 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
 
 ; Accessing bytes 4 and 6, not ok to widen to i32 if sanitize_thread is set.
 
-define i32 @test_widening_bad(i8* %P) nounwind ssp noredzone sanitize_thread {
+define i32 @test_widening_bad(ptr %P) nounwind ssp noredzone sanitize_thread {
 entry:
-  %tmp = load i8, i8* getelementptr inbounds (%struct_of_8_bytes_4_aligned, %struct_of_8_bytes_4_aligned* @f, i64 0, i32 1), align 4
+  %tmp = load i8, ptr getelementptr inbounds (%struct_of_8_bytes_4_aligned, ptr @f, i64 0, i32 1), align 4
   %conv = zext i8 %tmp to i32
-  %tmp1 = load i8, i8* getelementptr inbounds (%struct_of_8_bytes_4_aligned, %struct_of_8_bytes_4_aligned* @f, i64 0, i32 3), align 1
+  %tmp1 = load i8, ptr getelementptr inbounds (%struct_of_8_bytes_4_aligned, ptr @f, i64 0, i32 3), align 1
   %conv2 = zext i8 %tmp1 to i32
   %add = add nsw i32 %conv, %conv2
   ret i32 %add

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/tsan_address_space_attr.ll b/llvm/test/Instrumentation/ThreadSanitizer/tsan_address_space_attr.ll
index f12e152c0ef43..fe6744e437baa 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/tsan_address_space_attr.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/tsan_address_space_attr.ll
@@ -5,28 +5,28 @@ target triple = "x86_64-apple-macosx10.11.0"
 ; Checks that we do not instrument loads and stores comming from custom address space.
 ; These result in crashing the compiler.
 ; int foo(int argc, const char * argv[]) {
-;   void *__attribute__((address_space(256))) *gs_base = (((void * __attribute__((address_space(256))) *)0));
-;   void *somevalue = gs_base[-1];
+;   ptr__attribute__((address_space(256))) *gs_base = (((ptr __attribute__((address_space(256))) *)0));
+;   ptr somevalue = gs_base[-1];
 ;   return somevalue;
 ; }
 
-define i32 @foo(i32 %argc, i8** %argv) sanitize_thread {
+define i32 @foo(i32 %argc, ptr %argv) sanitize_thread {
 entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
-  %argv.addr = alloca i8**, align 8
-  %gs_base = alloca i8* addrspace(256)*, align 8
-  %somevalue = alloca i8*, align 8
-  store i32 0, i32* %retval, align 4
-  store i32 %argc, i32* %argc.addr, align 4
-  store i8** %argv, i8*** %argv.addr, align 8
-  store i8* addrspace(256)* null, i8* addrspace(256)** %gs_base, align 8
-  %0 = load i8* addrspace(256)*, i8* addrspace(256)** %gs_base, align 8
-  %arrayidx = getelementptr inbounds i8*, i8* addrspace(256)* %0, i64 -1
-  %1 = load i8*, i8* addrspace(256)* %arrayidx, align 8
-  store i8* %1, i8** %somevalue, align 8
-  %2 = load i8*, i8** %somevalue, align 8
-  %3 = ptrtoint i8* %2 to i32
+  %argv.addr = alloca ptr, align 8
+  %gs_base = alloca ptr addrspace(256), align 8
+  %somevalue = alloca ptr, align 8
+  store i32 0, ptr %retval, align 4
+  store i32 %argc, ptr %argc.addr, align 4
+  store ptr %argv, ptr %argv.addr, align 8
+  store ptr addrspace(256) null, ptr %gs_base, align 8
+  %0 = load ptr addrspace(256), ptr %gs_base, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr addrspace(256) %0, i64 -1
+  %1 = load ptr, ptr addrspace(256) %arrayidx, align 8
+  store ptr %1, ptr %somevalue, align 8
+  %2 = load ptr, ptr %somevalue, align 8
+  %3 = ptrtoint ptr %2 to i32
   ret i32 %3
 }
 ; CHECK-NOT: call void @__tsan_read

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll b/llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
index d59efd976a7ad..60a423835f21f 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
@@ -3,75 +3,74 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-unknown-linux-gnu"
 
-define i32 @read_4_bytes(i32* %a) sanitize_thread {
+define i32 @read_4_bytes(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 
-; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void ()* @tsan.module_ctor to i8*)]
+; CHECK: @llvm.used = appending global [1 x ptr] [ptr @tsan.module_ctor]
 ; CHECK: @llvm.global_ctors = {{.*}}@tsan.module_ctor
 
-; CHECK: define i32 @read_4_bytes(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_read4(i8* %1)
-; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 4
+; CHECK: define i32 @read_4_bytes(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_read4(ptr %a)
+; CHECK-NEXT:   %tmp1 = load i32, ptr %a, align 4
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i32
 
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1)
 
 
 ; Check that tsan converts mem intrinsics back to function calls.
 
-define void @MemCpyTest(i8* nocapture %x, i8* nocapture %y) sanitize_thread {
+define void @MemCpyTest(ptr nocapture %x, ptr nocapture %y) sanitize_thread {
 entry:
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
+    tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %x, ptr align 4 %y, i64 16, i1 false)
     ret void
 ; CHECK: define void @MemCpyTest
-; CHECK: call i8* @__tsan_memcpy
+; CHECK: call ptr @__tsan_memcpy
 ; CHECK: ret void
 }
 
-define void @MemCpyInlineTest(i8* nocapture %x, i8* nocapture %y) sanitize_thread {
+define void @MemCpyInlineTest(ptr nocapture %x, ptr nocapture %y) sanitize_thread {
 entry:
-    tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
+    tail call void @llvm.memcpy.inline.p0.p0.i64(ptr align 4 %x, ptr align 4 %y, i64 16, i1 false)
     ret void
 ; CHECK: define void @MemCpyInlineTest
-; CHECK: call i8* @__tsan_memcpy
+; CHECK: call ptr @__tsan_memcpy
 ; CHECK: ret void
 }
 
-define void @MemMoveTest(i8* nocapture %x, i8* nocapture %y) sanitize_thread {
+define void @MemMoveTest(ptr nocapture %x, ptr nocapture %y) sanitize_thread {
 entry:
-    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %x, i8* align 4 %y, i64 16, i1 false)
+    tail call void @llvm.memmove.p0.p0.i64(ptr align 4 %x, ptr align 4 %y, i64 16, i1 false)
     ret void
 ; CHECK: define void @MemMoveTest
-; CHECK: call i8* @__tsan_memmove
+; CHECK: call ptr @__tsan_memmove
 ; CHECK: ret void
 }
 
-define void @MemSetTest(i8* nocapture %x) sanitize_thread {
+define void @MemSetTest(ptr nocapture %x) sanitize_thread {
 entry:
-    tail call void @llvm.memset.p0i8.i64(i8* align 4 %x, i8 77, i64 16, i1 false)
+    tail call void @llvm.memset.p0.i64(ptr align 4 %x, i8 77, i64 16, i1 false)
     ret void
 ; CHECK: define void @MemSetTest
-; CHECK: call i8* @__tsan_memset
+; CHECK: call ptr @__tsan_memset
 ; CHECK: ret void
 }
 
-define void @MemSetInlineTest(i8* nocapture %x) sanitize_thread {
+define void @MemSetInlineTest(ptr nocapture %x) sanitize_thread {
 entry:
-    tail call void @llvm.memset.inline.p0i8.i64(i8* align 4 %x, i8 77, i64 16, i1 false)
+    tail call void @llvm.memset.inline.p0.i64(ptr align 4 %x, i8 77, i64 16, i1 false)
     ret void
 ; CHECK: define void @MemSetInlineTest
-; CHECK: call i8* @__tsan_memset
+; CHECK: call ptr @__tsan_memset
 ; CHECK: ret void
 }
 
@@ -79,12 +78,12 @@ entry:
 ; CHECK-NOT: __tsan_read
 ; CHECK-NOT: __tsan_write
 ; CHECK: ret
-define void @SwiftError(i8** swifterror) sanitize_thread {
-  %swifterror_ptr_value = load i8*, i8** %0
-  store i8* null, i8** %0
-  %swifterror_addr = alloca swifterror i8*
-  %swifterror_ptr_value_2 = load i8*, i8** %swifterror_addr
-  store i8* null, i8** %swifterror_addr
+define void @SwiftError(ptr swifterror) sanitize_thread {
+  %swifterror_ptr_value = load ptr, ptr %0
+  store ptr null, ptr %0
+  %swifterror_addr = alloca swifterror ptr
+  %swifterror_ptr_value_2 = load ptr, ptr %swifterror_addr
+  store ptr null, ptr %swifterror_addr
   ret void
 }
 
@@ -92,20 +91,20 @@ define void @SwiftError(i8** swifterror) sanitize_thread {
 ; CHECK-NOT: __tsan_read
 ; CHECK-NOT: __tsan_write
 ; CHECK: ret
-define void @SwiftErrorCall(i8** swifterror) sanitize_thread {
-  %swifterror_addr = alloca swifterror i8*
-  store i8* null, i8** %0
-  call void @SwiftError(i8** %0)
+define void @SwiftErrorCall(ptr swifterror) sanitize_thread {
+  %swifterror_addr = alloca swifterror ptr
+  store ptr null, ptr %0
+  call void @SwiftError(ptr %0)
   ret void
 }
 
-; CHECK-LABEL: @NakedTest(i32* %a)
+; CHECK-LABEL: @NakedTest(ptr %a)
 ; CHECK-NEXT:   call void @foo()
-; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 4
+; CHECK-NEXT:   %tmp1 = load i32, ptr %a, align 4
 ; CHECK-NEXT:   ret i32 %tmp1
-define i32 @NakedTest(i32* %a) naked sanitize_thread {
+define i32 @NakedTest(ptr %a) naked sanitize_thread {
   call void @foo()
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/tsan_musttail.ll b/llvm/test/Instrumentation/ThreadSanitizer/tsan_musttail.ll
index 6f11c643572d6..5e56aa2d11068 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/tsan_musttail.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/tsan_musttail.ll
@@ -1,30 +1,30 @@
 ; To test that __tsan_func_exit always happen before musttaill call and no exception handling code.
 ; RUN: opt < %s -passes=tsan -S | FileCheck %s
 
-define internal i32 @preallocated_musttail(i32* preallocated(i32) %p) sanitize_thread {
-  %rv = load i32, i32* %p
+define internal i32 @preallocated_musttail(ptr preallocated(i32) %p) sanitize_thread {
+  %rv = load i32, ptr %p
   ret i32 %rv
 }
 
-define i32 @call_preallocated_musttail(i32* preallocated(i32) %a) sanitize_thread {
-  %r = musttail call i32 @preallocated_musttail(i32* preallocated(i32) %a)
+define i32 @call_preallocated_musttail(ptr preallocated(i32) %a) sanitize_thread {
+  %r = musttail call i32 @preallocated_musttail(ptr preallocated(i32) %a)
   ret i32 %r
 }
 
-; CHECK-LABEL:  define i32 @call_preallocated_musttail(i32* preallocated(i32) %a) 
+; CHECK-LABEL:  define i32 @call_preallocated_musttail(ptr preallocated(i32) %a) 
 ; CHECK:          call void @__tsan_func_exit()
-; CHECK-NEXT:     %r = musttail call i32 @preallocated_musttail(i32* preallocated(i32) %a)
+; CHECK-NEXT:     %r = musttail call i32 @preallocated_musttail(ptr preallocated(i32) %a)
 ; CHECK-NEXT:     ret i32 %r
 
 
-define i32 @call_preallocated_musttail_cast(i32* preallocated(i32) %a) sanitize_thread {
-  %r = musttail call i32 @preallocated_musttail(i32* preallocated(i32) %a)
+define i32 @call_preallocated_musttail_cast(ptr preallocated(i32) %a) sanitize_thread {
+  %r = musttail call i32 @preallocated_musttail(ptr preallocated(i32) %a)
   %t = bitcast i32 %r to i32
   ret i32 %t
 }
 
-; CHECK-LABEL:  define i32 @call_preallocated_musttail_cast(i32* preallocated(i32) %a)
+; CHECK-LABEL:  define i32 @call_preallocated_musttail_cast(ptr preallocated(i32) %a)
 ; CHECK:          call void @__tsan_func_exit()
-; CHECK-NEXT:     %r = musttail call i32 @preallocated_musttail(i32* preallocated(i32) %a)
+; CHECK-NEXT:     %r = musttail call i32 @preallocated_musttail(ptr preallocated(i32) %a)
 ; CHECK-NEXT:     %t = bitcast i32 %r to i32
 ; CHECK-NEXT:     ret i32 %t

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/unaligned.ll b/llvm/test/Instrumentation/ThreadSanitizer/unaligned.ll
index 8190c21069ed3..e3b8c225a977c 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/unaligned.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/unaligned.ll
@@ -2,142 +2,132 @@
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-define i16 @test_unaligned_read2(i16* %a) sanitize_thread {
+define i16 @test_unaligned_read2(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load i16, i16* %a, align 1
+  %tmp1 = load i16, ptr %a, align 1
   ret i16 %tmp1
 }
 
-; CHECK-LABEL: define i16 @test_unaligned_read2(i16* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i16* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_read2(i8* %1)
-; CHECK-NEXT:   %tmp1 = load i16, i16* %a, align 1
+; CHECK-LABEL: define i16 @test_unaligned_read2(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_read2(ptr %a)
+; CHECK-NEXT:   %tmp1 = load i16, ptr %a, align 1
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i16
 
-define i32 @test_unaligned_read4(i32* %a) sanitize_thread {
+define i32 @test_unaligned_read4(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load i32, i32* %a, align 2
+  %tmp1 = load i32, ptr %a, align 2
   ret i32 %tmp1
 }
 
-; CHECK-LABEL: define i32 @test_unaligned_read4(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_read4(i8* %1)
-; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 2
+; CHECK-LABEL: define i32 @test_unaligned_read4(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_read4(ptr %a)
+; CHECK-NEXT:   %tmp1 = load i32, ptr %a, align 2
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i32
 
-define i64 @test_unaligned_read8(i64* %a) sanitize_thread {
+define i64 @test_unaligned_read8(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load i64, i64* %a, align 4
+  %tmp1 = load i64, ptr %a, align 4
   ret i64 %tmp1
 }
 
-; CHECK-LABEL: define i64 @test_unaligned_read8(i64* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i64* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_read8(i8* %1)
-; CHECK-NEXT:   %tmp1 = load i64, i64* %a, align 4
+; CHECK-LABEL: define i64 @test_unaligned_read8(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_read8(ptr %a)
+; CHECK-NEXT:   %tmp1 = load i64, ptr %a, align 4
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i64
 
-define i128 @test_unaligned_read16(i128* %a) sanitize_thread {
+define i128 @test_unaligned_read16(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load i128, i128* %a, align 1
+  %tmp1 = load i128, ptr %a, align 1
   ret i128 %tmp1
 }
 
-; CHECK-LABEL: define i128 @test_unaligned_read16(i128* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i128* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_read16(i8* %1)
-; CHECK-NEXT:   %tmp1 = load i128, i128* %a, align 1
+; CHECK-LABEL: define i128 @test_unaligned_read16(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_read16(ptr %a)
+; CHECK-NEXT:   %tmp1 = load i128, ptr %a, align 1
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i128
 
-define i128 @test_aligned_read16(i128* %a) sanitize_thread {
+define i128 @test_aligned_read16(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load i128, i128* %a, align 8
+  %tmp1 = load i128, ptr %a, align 8
   ret i128 %tmp1
 }
 
-; CHECK-LABEL: define i128 @test_aligned_read16(i128* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i128* %a to i8*
-; CHECK-NEXT:   call void @__tsan_read16(i8* %1)
-; CHECK-NEXT:   %tmp1 = load i128, i128* %a, align 8
+; CHECK-LABEL: define i128 @test_aligned_read16(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_read16(ptr %a)
+; CHECK-NEXT:   %tmp1 = load i128, ptr %a, align 8
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i128
 
-define void @test_unaligned_write2(i16* %a) sanitize_thread {
+define void @test_unaligned_write2(ptr %a) sanitize_thread {
 entry:
-  store i16 1, i16* %a, align 1
+  store i16 1, ptr %a, align 1
   ret void
 }
 
-; CHECK-LABEL: define void @test_unaligned_write2(i16* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i16* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_write2(i8* %1)
-; CHECK-NEXT:   store i16 1, i16* %a, align 1
+; CHECK-LABEL: define void @test_unaligned_write2(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_write2(ptr %a)
+; CHECK-NEXT:   store i16 1, ptr %a, align 1
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
-define void @test_unaligned_write4(i32* %a) sanitize_thread {
+define void @test_unaligned_write4(ptr %a) sanitize_thread {
 entry:
-  store i32 1, i32* %a, align 1
+  store i32 1, ptr %a, align 1
   ret void
 }
 
-; CHECK-LABEL: define void @test_unaligned_write4(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_write4(i8* %1)
-; CHECK-NEXT:   store i32 1, i32* %a, align 1
+; CHECK-LABEL: define void @test_unaligned_write4(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_write4(ptr %a)
+; CHECK-NEXT:   store i32 1, ptr %a, align 1
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
-define void @test_unaligned_write8(i64* %a) sanitize_thread {
+define void @test_unaligned_write8(ptr %a) sanitize_thread {
 entry:
-  store i64 1, i64* %a, align 1
+  store i64 1, ptr %a, align 1
   ret void
 }
 
-; CHECK-LABEL: define void @test_unaligned_write8(i64* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i64* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_write8(i8* %1)
-; CHECK-NEXT:   store i64 1, i64* %a, align 1
+; CHECK-LABEL: define void @test_unaligned_write8(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_write8(ptr %a)
+; CHECK-NEXT:   store i64 1, ptr %a, align 1
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
-define void @test_unaligned_write16(i128* %a) sanitize_thread {
+define void @test_unaligned_write16(ptr %a) sanitize_thread {
 entry:
-  store i128 1, i128* %a, align 1
+  store i128 1, ptr %a, align 1
   ret void
 }
 
-; CHECK-LABEL: define void @test_unaligned_write16(i128* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i128* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_write16(i8* %1)
-; CHECK-NEXT:   store i128 1, i128* %a, align 1
+; CHECK-LABEL: define void @test_unaligned_write16(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_write16(ptr %a)
+; CHECK-NEXT:   store i128 1, ptr %a, align 1
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
-define void @test_aligned_write16(i128* %a) sanitize_thread {
+define void @test_aligned_write16(ptr %a) sanitize_thread {
 entry:
-  store i128 1, i128* %a, align 8
+  store i128 1, ptr %a, align 8
   ret void
 }
 
-; CHECK-LABEL: define void @test_aligned_write16(i128* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i128* %a to i8*
-; CHECK-NEXT:   call void @__tsan_write16(i8* %1)
-; CHECK-NEXT:   store i128 1, i128* %a, align 8
+; CHECK-LABEL: define void @test_aligned_write16(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_write16(ptr %a)
+; CHECK-NEXT:   store i128 1, ptr %a, align 8
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/volatile.ll b/llvm/test/Instrumentation/ThreadSanitizer/volatile.ll
index 61179935e0100..4c88d1d44065f 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/volatile.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/volatile.ll
@@ -2,174 +2,162 @@
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-define i16 @test_volatile_read2(i16* %a) sanitize_thread {
+define i16 @test_volatile_read2(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load volatile i16, i16* %a, align 2
+  %tmp1 = load volatile i16, ptr %a, align 2
   ret i16 %tmp1
 }
 
-; CHECK-LABEL: define i16 @test_volatile_read2(i16* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i16* %a to i8*
-; CHECK-NEXT:   call void @__tsan_volatile_read2(i8* %1)
-; CHECK-NEXT:   %tmp1 = load volatile i16, i16* %a, align 2
+; CHECK-LABEL: define i16 @test_volatile_read2(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_volatile_read2(ptr %a)
+; CHECK-NEXT:   %tmp1 = load volatile i16, ptr %a, align 2
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i16
 
-define i32 @test_volatile_read4(i32* %a) sanitize_thread {
+define i32 @test_volatile_read4(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load volatile i32, i32* %a, align 4
+  %tmp1 = load volatile i32, ptr %a, align 4
   ret i32 %tmp1
 }
 
-; CHECK-LABEL: define i32 @test_volatile_read4(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_volatile_read4(i8* %1)
-; CHECK-NEXT:   %tmp1 = load volatile i32, i32* %a, align 4
+; CHECK-LABEL: define i32 @test_volatile_read4(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_volatile_read4(ptr %a)
+; CHECK-NEXT:   %tmp1 = load volatile i32, ptr %a, align 4
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i32
 
-define i64 @test_volatile_read8(i64* %a) sanitize_thread {
+define i64 @test_volatile_read8(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load volatile i64, i64* %a, align 8
+  %tmp1 = load volatile i64, ptr %a, align 8
   ret i64 %tmp1
 }
 
-; CHECK-LABEL: define i64 @test_volatile_read8(i64* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i64* %a to i8*
-; CHECK-NEXT:   call void @__tsan_volatile_read8(i8* %1)
-; CHECK-NEXT:   %tmp1 = load volatile i64, i64* %a, align 8
+; CHECK-LABEL: define i64 @test_volatile_read8(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_volatile_read8(ptr %a)
+; CHECK-NEXT:   %tmp1 = load volatile i64, ptr %a, align 8
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i64
 
-define i128 @test_volatile_read16(i128* %a) sanitize_thread {
+define i128 @test_volatile_read16(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load volatile i128, i128* %a, align 16
+  %tmp1 = load volatile i128, ptr %a, align 16
   ret i128 %tmp1
 }
 
-; CHECK-LABEL: define i128 @test_volatile_read16(i128* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i128* %a to i8*
-; CHECK-NEXT:   call void @__tsan_volatile_read16(i8* %1)
-; CHECK-NEXT:   %tmp1 = load volatile i128, i128* %a, align 16
+; CHECK-LABEL: define i128 @test_volatile_read16(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_volatile_read16(ptr %a)
+; CHECK-NEXT:   %tmp1 = load volatile i128, ptr %a, align 16
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i128
 
-define void @test_volatile_write2(i16* %a) sanitize_thread {
+define void @test_volatile_write2(ptr %a) sanitize_thread {
 entry:
-  store volatile i16 1, i16* %a, align 2
+  store volatile i16 1, ptr %a, align 2
   ret void
 }
 
-; CHECK-LABEL: define void @test_volatile_write2(i16* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i16* %a to i8*
-; CHECK-NEXT:   call void @__tsan_volatile_write2(i8* %1)
-; CHECK-NEXT:   store volatile i16 1, i16* %a, align 2
+; CHECK-LABEL: define void @test_volatile_write2(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_volatile_write2(ptr %a)
+; CHECK-NEXT:   store volatile i16 1, ptr %a, align 2
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
-define void @test_volatile_write4(i32* %a) sanitize_thread {
+define void @test_volatile_write4(ptr %a) sanitize_thread {
 entry:
-  store volatile i32 1, i32* %a, align 4
+  store volatile i32 1, ptr %a, align 4
   ret void
 }
 
-; CHECK-LABEL: define void @test_volatile_write4(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_volatile_write4(i8* %1)
-; CHECK-NEXT:   store volatile i32 1, i32* %a, align 4
+; CHECK-LABEL: define void @test_volatile_write4(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_volatile_write4(ptr %a)
+; CHECK-NEXT:   store volatile i32 1, ptr %a, align 4
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
-define void @test_volatile_write8(i64* %a) sanitize_thread {
+define void @test_volatile_write8(ptr %a) sanitize_thread {
 entry:
-  store volatile i64 1, i64* %a, align 8
+  store volatile i64 1, ptr %a, align 8
   ret void
 }
 
-; CHECK-LABEL: define void @test_volatile_write8(i64* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i64* %a to i8*
-; CHECK-NEXT:   call void @__tsan_volatile_write8(i8* %1)
-; CHECK-NEXT:   store volatile i64 1, i64* %a, align 8
+; CHECK-LABEL: define void @test_volatile_write8(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_volatile_write8(ptr %a)
+; CHECK-NEXT:   store volatile i64 1, ptr %a, align 8
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
-define void @test_volatile_write16(i128* %a) sanitize_thread {
+define void @test_volatile_write16(ptr %a) sanitize_thread {
 entry:
-  store volatile i128 1, i128* %a, align 16
+  store volatile i128 1, ptr %a, align 16
   ret void
 }
 
-; CHECK-LABEL: define void @test_volatile_write16(i128* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i128* %a to i8*
-; CHECK-NEXT:   call void @__tsan_volatile_write16(i8* %1)
-; CHECK-NEXT:   store volatile i128 1, i128* %a, align 16
+; CHECK-LABEL: define void @test_volatile_write16(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_volatile_write16(ptr %a)
+; CHECK-NEXT:   store volatile i128 1, ptr %a, align 16
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
 ; Check unaligned volatile accesses
 
-define i32 @test_unaligned_read4(i32* %a) sanitize_thread {
+define i32 @test_unaligned_read4(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load volatile i32, i32* %a, align 2
+  %tmp1 = load volatile i32, ptr %a, align 2
   ret i32 %tmp1
 }
 
-; CHECK-LABEL: define i32 @test_unaligned_read4(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_volatile_read4(i8* %1)
-; CHECK-NEXT:   %tmp1 = load volatile i32, i32* %a, align 2
+; CHECK-LABEL: define i32 @test_unaligned_read4(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_volatile_read4(ptr %a)
+; CHECK-NEXT:   %tmp1 = load volatile i32, ptr %a, align 2
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i32
 
-define void @test_unaligned_write4(i32* %a) sanitize_thread {
+define void @test_unaligned_write4(ptr %a) sanitize_thread {
 entry:
-  store volatile i32 1, i32* %a, align 1
+  store volatile i32 1, ptr %a, align 1
   ret void
 }
 
-; CHECK-LABEL: define void @test_unaligned_write4(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_unaligned_volatile_write4(i8* %1)
-; CHECK-NEXT:   store volatile i32 1, i32* %a, align 1
+; CHECK-LABEL: define void @test_unaligned_write4(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_unaligned_volatile_write4(ptr %a)
+; CHECK-NEXT:   store volatile i32 1, ptr %a, align 1
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void
 
 ; Check that regular aligned accesses are unaffected
 
-define i32 @test_read4(i32* %a) sanitize_thread {
+define i32 @test_read4(ptr %a) sanitize_thread {
 entry:
-  %tmp1 = load i32, i32* %a, align 4
+  %tmp1 = load i32, ptr %a, align 4
   ret i32 %tmp1
 }
 
-; CHECK-LABEL: define i32 @test_read4(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_read4(i8* %1)
-; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 4
+; CHECK-LABEL: define i32 @test_read4(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_read4(ptr %a)
+; CHECK-NEXT:   %tmp1 = load i32, ptr %a, align 4
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret i32
 
-define void @test_write4(i32* %a) sanitize_thread {
+define void @test_write4(ptr %a) sanitize_thread {
 entry:
-  store i32 1, i32* %a, align 4
+  store i32 1, ptr %a, align 4
   ret void
 }
 
-; CHECK-LABEL: define void @test_write4(i32* %a)
-; CHECK:        call void @__tsan_func_entry(i8* %0)
-; CHECK-NEXT:   %1 = bitcast i32* %a to i8*
-; CHECK-NEXT:   call void @__tsan_write4(i8* %1)
-; CHECK-NEXT:   store i32 1, i32* %a, align 4
+; CHECK-LABEL: define void @test_write4(ptr %a)
+; CHECK:        call void @__tsan_func_entry(ptr %0)
+; CHECK-NEXT:   call void @__tsan_write4(ptr %a)
+; CHECK-NEXT:   store i32 1, ptr %a, align 4
 ; CHECK-NEXT:   call void @__tsan_func_exit()
 ; CHECK: ret void

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/vptr_read.ll b/llvm/test/Instrumentation/ThreadSanitizer/vptr_read.ll
index 4737cdd98389e..2b0ceea4df49b 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/vptr_read.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/vptr_read.ll
@@ -2,10 +2,10 @@
 ; Check that vptr reads are treated in a special way.
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-define i8 @Foo(i8* %a) nounwind uwtable sanitize_thread {
+define i8 @Foo(ptr %a) nounwind uwtable sanitize_thread {
 entry:
 ; CHECK: call void @__tsan_vptr_read
-  %0 = load i8, i8* %a, align 8, !tbaa !0
+  %0 = load i8, ptr %a, align 8, !tbaa !0
   ret i8 %0
 }
 !0 = !{!2, !2, i64 0}

diff  --git a/llvm/test/Instrumentation/ThreadSanitizer/vptr_update.ll b/llvm/test/Instrumentation/ThreadSanitizer/vptr_update.ll
index 961d622a2e899..e222e87fa7dbf 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/vptr_update.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/vptr_update.ll
@@ -2,21 +2,21 @@
 ; Check that vtable pointer updates are treated in a special way.
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 
-define void @Foo(i8** nocapture %a, i8* %b) nounwind uwtable sanitize_thread {
+define void @Foo(ptr nocapture %a, ptr %b) nounwind uwtable sanitize_thread {
 entry:
 ; CHECK-LABEL: @Foo
 ; CHECK: call void @__tsan_vptr_update
 ; CHECK: ret void
-  store i8* %b, i8** %a, align 8, !tbaa !0
+  store ptr %b, ptr %a, align 8, !tbaa !0
   ret void
 }
 
-define void @FooInt(i64* nocapture %a, i64 %b) nounwind uwtable sanitize_thread {
+define void @FooInt(ptr nocapture %a, i64 %b) nounwind uwtable sanitize_thread {
 entry:
 ; CHECK-LABEL: @FooInt
 ; CHECK: call void @__tsan_vptr_update
 ; CHECK: ret void
-  store i64 %b, i64* %a, align 8, !tbaa !0
+  store i64 %b, ptr %a, align 8, !tbaa !0
   ret void
 }
 
@@ -25,13 +25,13 @@ declare i32 @Func1()
 declare i32 @Func2()
 
 ; Test that we properly handle vector stores marked as vtable updates.
-define void @VectorVptrUpdate(<2 x i8*>* nocapture %a, i8* %b) nounwind uwtable sanitize_thread {
+define void @VectorVptrUpdate(ptr nocapture %a, ptr %b) nounwind uwtable sanitize_thread {
 entry:
 ; CHECK-LABEL: @VectorVptrUpdate
 ; CHECK: call void @__tsan_vptr_update{{.*}}Func1
 ; CHECK-NOT: call void @__tsan_vptr_update
 ; CHECK: ret void
-  store <2 x i8 *> <i8* bitcast(i32 ()* @Func1 to i8 *), i8* bitcast(i32 ()* @Func2 to i8 *)>,  <2 x i8 *>* %a, align 8, !tbaa !0
+  store <2 x ptr> <ptr @Func1, ptr @Func2>,  ptr %a, align 8, !tbaa !0
   ret void
 }
 


        


More information about the llvm-commits mailing list