[clang] a208742 - [Clang] Convert some tests to opaque pointers (NFC)

Nikita Popov via cfe-commits cfe-commits at lists.llvm.org
Fri Mar 10 05:43:52 PST 2023


Author: Nikita Popov
Date: 2023-03-10T14:43:42+01:00
New Revision: a20874276be777bed4f34c52438efd66798c2ec5

URL: https://github.com/llvm/llvm-project/commit/a20874276be777bed4f34c52438efd66798c2ec5
DIFF: https://github.com/llvm/llvm-project/commit/a20874276be777bed4f34c52438efd66798c2ec5.diff

LOG: [Clang] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    clang/test/CodeGen/catch-undef-behavior.c
    clang/test/CodeGen/code-coverage-tsan.c
    clang/test/CodeGen/compound-literal.c
    clang/test/CodeGen/const-init.c
    clang/test/CodeGen/debug-info-block-decl.c
    clang/test/CodeGen/exceptions-seh-finally.c
    clang/test/CodeGen/extend-arg-64.c
    clang/test/CodeGen/hexagon-linux-vararg.c
    clang/test/CodeGen/ifunc.c
    clang/test/CodeGen/incomplete-function-type-2.c
    clang/test/CodeGen/init-memset.c
    clang/test/CodeGen/init.c
    clang/test/CodeGen/mangle-blocks.c
    clang/test/CodeGen/matrix-cast.c
    clang/test/CodeGen/matrix-type-operators.c
    clang/test/CodeGen/matrix-type.c
    clang/test/CodeGen/mingw-long-double.c
    clang/test/CodeGen/mips-inline-asm-modifiers.c
    clang/test/CodeGen/mips-varargs.c
    clang/test/CodeGen/ms-intrinsics.c
    clang/test/CodeGen/no-bitfield-type-align.c
    clang/test/CodeGen/no-builtin.cpp
    clang/test/CodeGen/packed-nest-unpacked.c
    clang/test/CodeGen/partial-reinitialization2.c
    clang/test/CodeGen/semantic-interposition.c
    clang/test/CodeGen/sparc-vaarg.c
    clang/test/CodeGen/sparcv9-dwarf.c
    clang/test/CodeGen/staticinit.c
    clang/test/CodeGen/temporary-lifetime-exceptions.cpp
    clang/test/CodeGen/union-tbaa1.c
    clang/test/CodeGen/volatile.c
    clang/test/CodeGen/windows-swiftcall.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/catch-undef-behavior.c b/clang/test/CodeGen/catch-undef-behavior.c
index 9f511d54f458c..ca0df0f002f89 100644
--- a/clang/test/CodeGen/catch-undef-behavior.c
+++ b/clang/test/CodeGen/catch-undef-behavior.c
@@ -1,6 +1,6 @@
-// RUN: %clang_cc1 -no-opaque-pointers -fsanitize=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-UBSAN
-// RUN: %clang_cc1 -no-opaque-pointers -fsanitize-trap=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-TRAP
-// RUN: %clang_cc1 -no-opaque-pointers -fsanitize=signed-integer-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-OVERFLOW
+// RUN: %clang_cc1 -fsanitize=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-UBSAN
+// RUN: %clang_cc1 -fsanitize-trap=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize=alignment,null,object-size,shift-base,shift-exponent,return,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -fsanitize-recover=alignment,null,object-size,shift-base,shift-exponent,signed-integer-overflow,vla-bound,float-cast-overflow,integer-divide-by-zero,bool,returns-nonnull-attribute,nonnull-attribute -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-TRAP
+// RUN: %clang_cc1 -fsanitize=signed-integer-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-OVERFLOW
 
 // CHECK-UBSAN: @[[INT:.*]] = private unnamed_addr constant { i16, i16, [6 x i8] } { i16 0, i16 11, [6 x i8] c"'int'\00" }
 
@@ -32,15 +32,14 @@
 void foo(void) {
   union { int i; } u;
 
-  // CHECK-COMMON:      %[[I8PTR:.*]] = bitcast i32* %[[PTR:.*]] to i8*
-  // CHECK-COMMON-NEXT: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* %[[I8PTR]], i1 false, i1 false, i1 false)
+  // CHECK-COMMON: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0(ptr %[[PTR:.*]], i1 false, i1 false, i1 false)
   // CHECK-COMMON-NEXT: %[[OK:.*]] = icmp uge i64 %[[SIZE]], 4
 
   // CHECK-UBSAN: br i1 %[[OK]], {{.*}} !prof ![[WEIGHT_MD:.*]], !nosanitize
   // CHECK-TRAP:  br i1 %[[OK]], {{.*}}
 
   // CHECK-UBSAN:      %[[ARG:.*]] = ptrtoint {{.*}} %[[PTR]] to i64
-  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_type_mismatch_v1(i8* bitcast ({{.*}} @[[LINE_100]] to i8*), i64 %[[ARG]])
+  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_type_mismatch_v1(ptr @[[LINE_100]], i64 %[[ARG]])
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 22) [[NR_NUW:#[0-9]+]]
   // CHECK-TRAP-NEXT: unreachable
@@ -57,7 +56,7 @@ int bar(int *a) {
   // CHECK-COMMON-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 3
   // CHECK-COMMON-NEXT: icmp eq i64 %[[MISALIGN]], 0
 
-  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(i8* bitcast ({{.*}} @[[LINE_200]] to i8*), i64 %[[PTRINT]])
+  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(ptr @[[LINE_200]], i64 %[[PTRINT]])
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 22) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -92,7 +91,7 @@ int lsh_overflow(int a, int b) {
 
   // CHECK-UBSAN:      %[[ARG1:.*]] = zext
   // CHECK-UBSAN-NEXT: %[[ARG2:.*]] = zext
-  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_shift_out_of_bounds(i8* bitcast ({{.*}} @[[LINE_300]] to i8*), i64 %[[ARG1]], i64 %[[ARG2]])
+  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_shift_out_of_bounds(ptr @[[LINE_300]], i64 %[[ARG1]], i64 %[[ARG2]])
   // CHECK-UBSAN-NOT:  call void @__ubsan_handle_shift_out_of_bounds
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 20) [[NR_NUW]]
@@ -112,7 +111,7 @@ int rsh_inbounds(int a, int b) {
 
   // CHECK-UBSAN:      %[[ARG1:.*]] = zext
   // CHECK-UBSAN-NEXT: %[[ARG2:.*]] = zext
-  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_shift_out_of_bounds(i8* bitcast ({{.*}} @[[LINE_400]] to i8*), i64 %[[ARG1]], i64 %[[ARG2]])
+  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_shift_out_of_bounds(ptr @[[LINE_400]], i64 %[[ARG1]], i64 %[[ARG2]])
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 20) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -125,7 +124,7 @@ int rsh_inbounds(int a, int b) {
 
 // CHECK-COMMON-LABEL: @load
 int load(int *p) {
-  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(i8* bitcast ({{.*}} @[[LINE_500]] to i8*), i64 %{{.*}})
+  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(ptr @[[LINE_500]], i64 %{{.*}})
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 22) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -135,7 +134,7 @@ int load(int *p) {
 
 // CHECK-COMMON-LABEL: @store
 void store(int *p, int q) {
-  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(i8* bitcast ({{.*}} @[[LINE_600]] to i8*), i64 %{{.*}})
+  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(ptr @[[LINE_600]], i64 %{{.*}})
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 22) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -147,7 +146,7 @@ struct S { int k; };
 
 // CHECK-COMMON-LABEL: @member_access
 int *member_access(struct S *p) {
-  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(i8* bitcast ({{.*}} @[[LINE_700]] to i8*), i64 %{{.*}})
+  // CHECK-UBSAN: call void @__ubsan_handle_type_mismatch_v1(ptr @[[LINE_700]], i64 %{{.*}})
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 22) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -159,7 +158,7 @@ int *member_access(struct S *p) {
 int signed_overflow(int a, int b) {
   // CHECK-UBSAN:      %[[ARG1:.*]] = zext
   // CHECK-UBSAN-NEXT: %[[ARG2:.*]] = zext
-  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_add_overflow(i8* bitcast ({{.*}} @[[LINE_800]] to i8*), i64 %[[ARG1]], i64 %[[ARG2]])
+  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_add_overflow(ptr @[[LINE_800]], i64 %[[ARG1]], i64 %[[ARG2]])
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 0) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -182,7 +181,7 @@ void vla_bound(int n) {
   // CHECK-UBSAN:      icmp sgt i32 %[[PARAM:.*]], 0
   //
   // CHECK-UBSAN:      %[[ARG:.*]] = zext i32 %[[PARAM]] to i64
-  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_vla_bound_not_positive(i8* bitcast ({{.*}} @[[LINE_900]] to i8*), i64 %[[ARG]])
+  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_vla_bound_not_positive(ptr @[[LINE_900]], i64 %[[ARG]])
 #line 900
   int arr[n * 3];
 }
@@ -192,7 +191,7 @@ void vla_bound_unsigned(unsigned int n) {
   // CHECK-UBSAN:      icmp ugt i32 %[[PARAM:.*]], 0
   //
   // CHECK-UBSAN:      %[[ARG:.*]] = zext i32 %[[PARAM]] to i64
-  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_vla_bound_not_positive(i8* bitcast ({{.*}} @[[LINE_1000]] to i8*), i64 %[[ARG]])
+  // CHECK-UBSAN-NEXT: call void @__ubsan_handle_vla_bound_not_positive(ptr @[[LINE_1000]], i64 %[[ARG]])
 #line 1000
   int arr[n * 3];
 }
@@ -227,7 +226,7 @@ int float_int_overflow(float f) {
 
   // CHECK-UBSAN: %[[CAST:.*]] = bitcast float %[[F]] to i32
   // CHECK-UBSAN: %[[ARG:.*]] = zext i32 %[[CAST]] to i64
-  // CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(i8* bitcast ({{.*}} @[[LINE_1200]] to i8*), i64 %[[ARG]]
+  // CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(ptr @[[LINE_1200]], i64 %[[ARG]]
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 5) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -244,9 +243,9 @@ int long_double_int_overflow(long double ld) {
   // CHECK-COMMON: %[[INBOUNDS:.*]] = and i1 %[[GE]], %[[LE]]
   // CHECK-COMMON-NEXT: br i1 %[[INBOUNDS]]
 
-  // CHECK-UBSAN: store x86_fp80 %[[F]], x86_fp80* %[[ALLOCA:.*]], align 16, !nosanitize
-  // CHECK-UBSAN: %[[ARG:.*]] = ptrtoint x86_fp80* %[[ALLOCA]] to i64
-  // CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(i8* bitcast ({{.*}} @[[LINE_1300]] to i8*), i64 %[[ARG]]
+  // CHECK-UBSAN: store x86_fp80 %[[F]], ptr %[[ALLOCA:.*]], align 16, !nosanitize
+  // CHECK-UBSAN: %[[ARG:.*]] = ptrtoint ptr %[[ALLOCA]] to i64
+  // CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(ptr @[[LINE_1300]], i64 %[[ARG]]
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 5) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -261,7 +260,7 @@ unsigned float_uint_overflow(float f) {
   // CHECK-COMMON: %[[INBOUNDS:.*]] = and i1 %[[GE]], %[[LE]]
   // CHECK-COMMON-NEXT: br i1 %[[INBOUNDS]]
 
-  // CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(i8* bitcast ({{.*}} @[[LINE_1400]] to i8*),
+  // CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(ptr @[[LINE_1400]],
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 5) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -276,7 +275,7 @@ signed char fp16_char_overflow(__fp16 *p) {
   // CHECK-COMMON: %[[INBOUNDS:.*]] = and i1 %[[GE]], %[[LE]]
   // CHECK-COMMON-NEXT: br i1 %[[INBOUNDS]]
 
-  // CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(i8* bitcast ({{.*}} @[[LINE_1500]] to i8*),
+  // CHECK-UBSAN: call void @__ubsan_handle_float_cast_overflow(ptr @[[LINE_1500]],
 
   // CHECK-TRAP:      call void @llvm.ubsantrap(i8 5) [[NR_NUW]]
   // CHECK-TRAP-NEXT: unreachable
@@ -322,7 +321,7 @@ _Bool sour_bool(_Bool *p) {
   // CHECK-COMMON: %[[OK:.*]] = icmp ule i8 {{.*}}, 1
   // CHECK-COMMON: br i1 %[[OK]]
 
-  // CHECK-UBSAN: call void @__ubsan_handle_load_invalid_value(i8* bitcast ({{.*}}), i64 {{.*}})
+  // CHECK-UBSAN: call void @__ubsan_handle_load_invalid_value(ptr {{.*}}, i64 {{.*}})
 
   // CHECK-TRAP: call void @llvm.ubsantrap(i8 10) [[NR_NUW]]
   // CHECK-TRAP: unreachable
@@ -332,7 +331,7 @@ _Bool sour_bool(_Bool *p) {
 // CHECK-COMMON-LABEL: @ret_nonnull
 __attribute__((returns_nonnull))
 int *ret_nonnull(int *a) {
-  // CHECK-COMMON: [[OK:%.*]] = icmp ne i32* {{.*}}, null
+  // CHECK-COMMON: [[OK:%.*]] = icmp ne ptr {{.*}}, null
   // CHECK-COMMON: br i1 [[OK]]
 
   // CHECK-UBSAN: call void @__ubsan_handle_nonnull_return
@@ -345,7 +344,7 @@ int *ret_nonnull(int *a) {
 // CHECK-COMMON-LABEL: @call_decl_nonnull
 __attribute__((nonnull)) void decl_nonnull(int *a);
 void call_decl_nonnull(int *a) {
-  // CHECK-COMMON: [[OK:%.*]] = icmp ne i32* {{.*}}, null
+  // CHECK-COMMON: [[OK:%.*]] = icmp ne ptr {{.*}}, null
   // CHECK-COMMON: br i1 [[OK]]
 
   // CHECK-UBSAN: call void @__ubsan_handle_nonnull_arg
@@ -359,11 +358,11 @@ extern void *memcpy (void *, const void *, unsigned) __attribute__((nonnull(1, 2
 
 // CHECK-COMMON-LABEL: @call_memcpy_nonnull
 void call_memcpy_nonnull(void *p, void *q, int sz) {
-  // CHECK-COMMON: icmp ne i8* {{.*}}, null
+  // CHECK-COMMON: icmp ne ptr {{.*}}, null
   // CHECK-UBSAN: call void @__ubsan_handle_nonnull_arg
   // CHECK-TRAP: call void @llvm.ubsantrap(i8 16)
 
-  // CHECK-COMMON: icmp ne i8* {{.*}}, null
+  // CHECK-COMMON: icmp ne ptr {{.*}}, null
   // CHECK-UBSAN: call void @__ubsan_handle_nonnull_arg
   // CHECK-TRAP: call void @llvm.ubsantrap(i8 16)
   memcpy(p, q, sz);
@@ -373,11 +372,11 @@ extern void *memmove (void *, const void *, unsigned) __attribute__((nonnull(1,
 
 // CHECK-COMMON-LABEL: @call_memmove_nonnull
 void call_memmove_nonnull(void *p, void *q, int sz) {
-  // CHECK-COMMON: icmp ne i8* {{.*}}, null
+  // CHECK-COMMON: icmp ne ptr {{.*}}, null
   // CHECK-UBSAN: call void @__ubsan_handle_nonnull_arg
   // CHECK-TRAP: call void @llvm.ubsantrap(i8 16)
 
-  // CHECK-COMMON: icmp ne i8* {{.*}}, null
+  // CHECK-COMMON: icmp ne ptr {{.*}}, null
   // CHECK-UBSAN: call void @__ubsan_handle_nonnull_arg
   // CHECK-TRAP: call void @llvm.ubsantrap(i8 16)
   memmove(p, q, sz);
@@ -386,7 +385,7 @@ void call_memmove_nonnull(void *p, void *q, int sz) {
 // CHECK-COMMON-LABEL: @call_nonnull_variadic
 __attribute__((nonnull)) void nonnull_variadic(int a, ...);
 void call_nonnull_variadic(int a, int *b) {
-  // CHECK-COMMON: [[OK:%.*]] = icmp ne i32* {{.*}}, null
+  // CHECK-COMMON: [[OK:%.*]] = icmp ne ptr {{.*}}, null
   // CHECK-COMMON: br i1 [[OK]]
 
   // CHECK-UBSAN: call void @__ubsan_handle_nonnull_arg

diff  --git a/clang/test/CodeGen/code-coverage-tsan.c b/clang/test/CodeGen/code-coverage-tsan.c
index 827a746d153fc..c7928fd505e0c 100644
--- a/clang/test/CodeGen/code-coverage-tsan.c
+++ b/clang/test/CodeGen/code-coverage-tsan.c
@@ -1,12 +1,12 @@
 /// -fprofile-update=atomic (implied by -fsanitize=thread) requires the
 /// (potentially concurrent) counter updates to be atomic.
-// RUN: %clang_cc1 -no-opaque-pointers %s -triple x86_64 -emit-llvm -fprofile-update=atomic -ftest-coverage -fprofile-arcs \
+// RUN: %clang_cc1 %s -triple x86_64 -emit-llvm -fprofile-update=atomic -ftest-coverage -fprofile-arcs \
 // RUN:   -coverage-notes-file /dev/null -coverage-data-file /dev/null -o - | FileCheck %s
 
 // CHECK-LABEL: void @foo()
 /// Two counters are incremented by __tsan_atomic64_fetch_add.
-// CHECK:         atomicrmw add i64* {{.*}} @__llvm_gcov_ctr{{.*}} monotonic, align 8
-// CHECK-NEXT:    atomicrmw sub i32*
+// CHECK:         atomicrmw add ptr @__llvm_gcov_ctr{{.*}} monotonic, align 8
+// CHECK-NEXT:    atomicrmw sub ptr
 
 _Atomic(int) cnt;
 void foo(void) { cnt--; }

diff  --git a/clang/test/CodeGen/compound-literal.c b/clang/test/CodeGen/compound-literal.c
index 967e3fb53dcf9..ecc2c73edfb1e 100644
--- a/clang/test/CodeGen/compound-literal.c
+++ b/clang/test/CodeGen/compound-literal.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm %s -o - | FileCheck %s
 
 // Capture the type and name so matching later is cleaner.
 struct CompoundTy { int a; };
@@ -28,17 +28,15 @@ void f(void) {
   // CHECK: [[S:%[a-zA-Z0-9.]+]] = alloca [[STRUCT:%[a-zA-Z0-9.]+]],
   struct S s;
   // CHECK-NEXT: [[COMPOUNDLIT:%[a-zA-Z0-9.]+]] = alloca [[STRUCT]]
-  // CHECK-NEXT: [[CX:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[COMPOUNDLIT]], i32 0, i32 0
-  // CHECK-NEXT: [[SY:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[S]], i32 0, i32 1
-  // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32, i32* [[SY]]
-  // CHECK-NEXT: store i32 [[TMP]], i32* [[CX]]
-  // CHECK-NEXT: [[CY:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[COMPOUNDLIT]], i32 0, i32 1
-  // CHECK-NEXT: [[SX:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[S]], i32 0, i32 0
-  // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32, i32* [[SX]]
-  // CHECK-NEXT: store i32 [[TMP]], i32* [[CY]]
-  // CHECK-NEXT: [[SI8:%[a-zA-Z0-9.]+]] = bitcast [[STRUCT]]* [[S]] to i8*
-  // CHECK-NEXT: [[COMPOUNDLITI8:%[a-zA-Z0-9.]+]] = bitcast [[STRUCT]]* [[COMPOUNDLIT]] to i8*
-  // CHECK-NEXT: call void @llvm.memcpy{{.*}}(i8* align {{[0-9]+}} [[SI8]], i8* align {{[0-9]+}} [[COMPOUNDLITI8]]
+  // CHECK-NEXT: [[CX:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], ptr [[COMPOUNDLIT]], i32 0, i32 0
+  // CHECK-NEXT: [[SY:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], ptr [[S]], i32 0, i32 1
+  // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32, ptr [[SY]]
+  // CHECK-NEXT: store i32 [[TMP]], ptr [[CX]]
+  // CHECK-NEXT: [[CY:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], ptr [[COMPOUNDLIT]], i32 0, i32 1
+  // CHECK-NEXT: [[SX:%[a-zA-Z0-9.]+]] = getelementptr inbounds [[STRUCT]], ptr [[S]], i32 0, i32 0
+  // CHECK-NEXT: [[TMP:%[a-zA-Z0-9.]+]] = load i32, ptr [[SX]]
+  // CHECK-NEXT: store i32 [[TMP]], ptr [[CY]]
+  // CHECK-NEXT: call void @llvm.memcpy{{.*}}(ptr align {{[0-9]+}} [[S]], ptr align {{[0-9]+}} [[COMPOUNDLIT]]
   s = (S){s.y,s.x};
   // CHECK-NEXT: ret void
 }
@@ -56,24 +54,22 @@ struct G g(int x, int y, int z) {
   // CHECK-NEXT: store i32
 
   // Evaluate the compound literal directly in the result value slot.
-  // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], [[G]]* [[RESULT]], i32 0, i32 0
-  // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[X]], align 4
+  // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], ptr [[RESULT]], i32 0, i32 0
+  // CHECK-NEXT: [[T1:%.*]] = load i32, ptr [[X]], align 4
   // CHECK-NEXT: [[T2:%.*]] = trunc i32 [[T1]] to i16
-  // CHECK-NEXT: store i16 [[T2]], i16* [[T0]], align 2
-  // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], [[G]]* [[RESULT]], i32 0, i32 1
-  // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[Y]], align 4
+  // CHECK-NEXT: store i16 [[T2]], ptr [[T0]], align 2
+  // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], ptr [[RESULT]], i32 0, i32 1
+  // CHECK-NEXT: [[T1:%.*]] = load i32, ptr [[Y]], align 4
   // CHECK-NEXT: [[T2:%.*]] = trunc i32 [[T1]] to i16
-  // CHECK-NEXT: store i16 [[T2]], i16* [[T0]], align 2
-  // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], [[G]]* [[RESULT]], i32 0, i32 2
-  // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[Z]], align 4
+  // CHECK-NEXT: store i16 [[T2]], ptr [[T0]], align 2
+  // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[G]], ptr [[RESULT]], i32 0, i32 2
+  // CHECK-NEXT: [[T1:%.*]] = load i32, ptr [[Z]], align 4
   // CHECK-NEXT: [[T2:%.*]] = trunc i32 [[T1]] to i16
-  // CHECK-NEXT: store i16 [[T2]], i16* [[T0]], align 2
+  // CHECK-NEXT: store i16 [[T2]], ptr [[T0]], align 2
   return (struct G) { x, y, z };
 
-  // CHECK-NEXT: [[T0:%.*]] = bitcast i48* [[COERCE_TEMP]] to i8*
-  // CHECK-NEXT: [[T1:%.*]] = bitcast [[G]]* [[RESULT]] to i8*
-  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align {{[0-9]+}} [[T0]], i8* align {{[0-9]+}} [[T1]], i64 6
-  // CHECK-NEXT: [[T0:%.*]] = load i48, i48* [[COERCE_TEMP]]
+  // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align {{[0-9]+}} [[COERCE_TEMP]], ptr align {{[0-9]+}} [[RESULT]], i64 6
+  // CHECK-NEXT: [[T0:%.*]] = load i48, ptr [[COERCE_TEMP]]
   // CHECK-NEXT: ret i48 [[T0]]
 }
 
@@ -81,9 +77,9 @@ struct G g(int x, int y, int z) {
 // const pointer to a variable initialized by a compound literal.
 // CHECK-LABEL: define{{.*}} i32 @compareMyCLH() #0
 int compareMyCLH(void) {
-  // CHECK: store i8* bitcast ([[MY_CLH]] to i8*)
+  // CHECK: store [[MY_CLH]]
   const void *a = MyCLH;
-  // CHECK: store i8* bitcast ([[MY_CLH]] to i8*)
+  // CHECK: store [[MY_CLH]]
   const void *b = MyCLH;
   return a == b;
 }
@@ -92,7 +88,7 @@ int compareMyCLH(void) {
 // for a local variable.
 // CHECK-LABEL: define{{.*}} i32 @compound_array_fn()
 // CHECK: [[COMPOUND_ARRAY:%.*]] = alloca [8 x i32]
-// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}, i64 32, i1 false)
+// CHECK: call void @llvm.memcpy.p0.p0.i64({{.*}}, i64 32, i1 false)
 int compound_array_fn(void) {
   int compound_array[] = (int[]){1,2,3,4,5,6,7,8};
   return compound_array[0];

diff  --git a/clang/test/CodeGen/const-init.c b/clang/test/CodeGen/const-init.c
index 4748d71dca966..7f6035a2178a8 100644
--- a/clang/test/CodeGen/const-init.c
+++ b/clang/test/CodeGen/const-init.c
@@ -1,6 +1,6 @@
 // setting strict FP behaviour in the run line below tests that the compiler
 // does the right thing for global compound literals (compoundliteral test)
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-pc-linux-gnu -ffreestanding -Wno-pointer-to-int-cast -Wno-int-conversion -ffp-exception-behavior=strict -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple i386-pc-linux-gnu -ffreestanding -Wno-pointer-to-int-cast -Wno-int-conversion -ffp-exception-behavior=strict -emit-llvm -o - %s | FileCheck %s
 
 #include <stdint.h>
 
@@ -61,24 +61,24 @@ struct {
 } __attribute__((__packed__)) gv1  = { .a = 0x0, .b = 7,  };
 
 // PR5118
-// CHECK: @gv2 ={{.*}} global %struct.anon.0 <{ i8 1, i8* null }>, align 1
+// CHECK: @gv2 ={{.*}} global %struct.anon.0 <{ i8 1, ptr null }>, align 1
 struct {
   unsigned char a;
   char *b;
 } __attribute__((__packed__)) gv2 = { 1, (void*)0 };
 
 // Global references
-// CHECK: @g11.l0 = internal global i32 ptrtoint (i32 ()* @g11 to i32)
+// CHECK: @g11.l0 = internal global i32 ptrtoint (ptr @g11 to i32)
 long g11(void) {
   static long l0 = (long) g11;
   return l0;
 }
 
-// CHECK: @g12 ={{.*}} global i32 ptrtoint (i8* @g12_tmp to i32)
+// CHECK: @g12 ={{.*}} global i32 ptrtoint (ptr @g12_tmp to i32)
 static char g12_tmp;
 long g12 = (long) &g12_tmp;
 
-// CHECK: @g13 ={{.*}} global [1 x %struct.g13_s0] [%struct.g13_s0 { i32 ptrtoint (i8* @g12_tmp to i32) }]
+// CHECK: @g13 ={{.*}} global [1 x %struct.g13_s0] [%struct.g13_s0 { i32 ptrtoint (ptr @g12_tmp to i32) }]
 struct g13_s0 {
    long a;
 };
@@ -86,7 +86,7 @@ struct g13_s0 g13[] = {
    { (long) &g12_tmp }
 };
 
-// CHECK: @g14 ={{.*}} global i8* inttoptr (i32 100 to i8*)
+// CHECK: @g14 ={{.*}} global ptr inttoptr (i32 100 to ptr)
 void *g14 = (void*) 100;
 
 // CHECK: @g15 ={{.*}} global i32 -1
@@ -95,16 +95,16 @@ int g15 = (int) (char) ((void*) 0 + 255);
 // CHECK: @g16 ={{.*}} global i64 4294967295
 long long g16 = (long long) ((void*) 0xFFFFFFFF);
 
-// CHECK: @g17 ={{.*}} global i32* @g15
+// CHECK: @g17 ={{.*}} global ptr @g15
 int *g17 = (int *) ((long) &g15);
 
-// CHECK: @g18.p = internal global [1 x i32*] [i32* @g19]
+// CHECK: @g18.p = internal global [1 x ptr] [ptr @g19]
 void g18(void) {
   extern int g19;
   static int *p[] = { &g19 };
 }
 
-// CHECK: @g20.l0 = internal global %struct.g20_s1 { %struct.g20_s0* null, %struct.g20_s0** getelementptr inbounds (%struct.g20_s1, %struct.g20_s1* @g20.l0, i32 0, i32 0) }
+// CHECK: @g20.l0 = internal global %struct.g20_s1 { ptr null, ptr @g20.l0 }
 struct g20_s0;
 struct g20_s1 {
   struct g20_s0 *f0, **f1;
@@ -123,14 +123,14 @@ struct g22 {int x;} __attribute((packed));
 struct g23 {char a; short b; char c; struct g22 d;};
 struct g23 g24 = {1,2,3,4};
 
-// CHECK: @g25.g26 = internal global i8* getelementptr inbounds ([4 x i8], [4 x i8]* @[[FUNC:.*]], i32 0, i32 0)
+// CHECK: @g25.g26 = internal global ptr @[[FUNC:.*]], align 4
 // CHECK: @[[FUNC]] = private unnamed_addr constant [4 x i8] c"g25\00"
 int g25(void) {
   static const char *g26 = __func__;
   return *g26;
 }
 
-// CHECK: @g27.x = internal global i8* bitcast (i8** @g27.x to i8*), align 4
+// CHECK: @g27.x = internal global ptr @g27.x, align 4
 void g27(void) { // PR8073
   static void *x = &x;
 }
@@ -155,7 +155,7 @@ void g29(void) {
       DCC_PASSWD passwd;
   } DCC_SRVR_NM;
   // CHECK: @g29.a = internal global %struct.DCC_SRVR_NM { [2 x i8] c"@\00" }, align 1
-  // CHECK: @g29.b = internal global [1 x i32] [i32 ptrtoint ([5 x i8]* @.str.1 to i32)], align 4
+  // CHECK: @g29.b = internal global [1 x i32] [i32 ptrtoint (ptr @.str.1 to i32)], align 4
   // CHECK: @g29.c = internal global [1 x i32] [i32 97], align 4
   static DCC_SRVR_NM a = { {"@"} };
   static int b[1] = { "asdf" };

diff  --git a/clang/test/CodeGen/debug-info-block-decl.c b/clang/test/CodeGen/debug-info-block-decl.c
index 553cb60c72cc6..d94c2e6338c2b 100644
--- a/clang/test/CodeGen/debug-info-block-decl.c
+++ b/clang/test/CodeGen/debug-info-block-decl.c
@@ -1,9 +1,9 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin10 -debug-info-kind=limited -fblocks -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -debug-info-kind=limited -fblocks -emit-llvm -o - %s | FileCheck %s
 // Assignment and block entry should point to the same line.
 // rdar://problem/14039866
 
 // CHECK: define{{.*}}@main()
-// CHECK: store{{.*}}bitcast{{.*}}, !dbg ![[ASSIGNMENT:[0-9]+]]
+// CHECK: store {{.*}}, !dbg ![[ASSIGNMENT:[0-9]+]]
 // CHECK: define {{.*}} @__main_block_invoke
 // CHECK: , !dbg ![[BLOCK_ENTRY:[0-9]+]]
 

diff  --git a/clang/test/CodeGen/exceptions-seh-finally.c b/clang/test/CodeGen/exceptions-seh-finally.c
index 6a09a04568f3d..8ef5ca807b1d3 100644
--- a/clang/test/CodeGen/exceptions-seh-finally.c
+++ b/clang/test/CodeGen/exceptions-seh-finally.c
@@ -1,6 +1,6 @@
-// RUN: %clang_cc1 -no-opaque-pointers %s -triple x86_64-pc-win32 -fms-extensions -emit-llvm -O1 -disable-llvm-passes -o - | FileCheck %s
-// RUN: %clang_cc1 -no-opaque-pointers %s -triple i686-pc-win32 -fms-extensions -emit-llvm -O1 -disable-llvm-passes -o - | FileCheck %s
-// RUN: %clang_cc1 -no-opaque-pointers %s -triple aarch64-windows -fms-extensions -emit-llvm -O1 -disable-llvm-passes -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple x86_64-pc-win32 -fms-extensions -emit-llvm -O1 -disable-llvm-passes -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple i686-pc-win32 -fms-extensions -emit-llvm -O1 -disable-llvm-passes -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple aarch64-windows -fms-extensions -emit-llvm -O1 -disable-llvm-passes -o - | FileCheck %s
 // NOTE: we're passing "-O1 -disable-llvm-passes" to avoid adding optnone and noinline everywhere.
 
 void abort(void) __attribute__((noreturn));
@@ -20,14 +20,14 @@ void basic_finally(void) {
 // CHECK:     to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
 //
 // CHECK: [[invoke_cont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
-// CHECK: call void @"?fin$0 at 0@basic_finally@@"({{i8 noundef( zeroext)?}} 0, i8* noundef %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call ptr @llvm.localaddress()
+// CHECK: call void @"?fin$0 at 0@basic_finally@@"({{i8 noundef( zeroext)?}} 0, ptr noundef %[[fp]])
 // CHECK-NEXT: ret void
 //
 // CHECK: [[lpad]]
 // CHECK-NEXT: %[[pad:[^ ]*]] = cleanuppad
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
-// CHECK: call void @"?fin$0 at 0@basic_finally@@"({{i8 noundef( zeroext)?}} 1, i8* noundef %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call ptr @llvm.localaddress()
+// CHECK: call void @"?fin$0 at 0@basic_finally@@"({{i8 noundef( zeroext)?}} 1, ptr noundef %[[fp]])
 // CHECK-NEXT: cleanupret from %[[pad]] unwind to caller
 
 // CHECK: define internal void @"?fin$0 at 0@basic_finally@@"({{.*}})
@@ -60,8 +60,8 @@ void label_in_finally(void) {
 // CHECK:     to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
 //
 // CHECK: [[invoke_cont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
-// CHECK: call void @"?fin$0 at 0@label_in_finally@@"({{i8 noundef( zeroext)?}} 0, i8* noundef %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call ptr @llvm.localaddress()
+// CHECK: call void @"?fin$0 at 0@label_in_finally@@"({{i8 noundef( zeroext)?}} 0, ptr noundef %[[fp]])
 // CHECK: ret void
 
 // CHECK: define internal void @"?fin$0 at 0@label_in_finally@@"({{.*}})
@@ -88,20 +88,20 @@ void use_abnormal_termination(void) {
 // CHECK:     to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
 //
 // CHECK: [[invoke_cont]]
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
-// CHECK: call void @"?fin$0 at 0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} 0, i8* noundef %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call ptr @llvm.localaddress()
+// CHECK: call void @"?fin$0 at 0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} 0, ptr noundef %[[fp]])
 // CHECK: ret void
 //
 // CHECK: [[lpad]]
 // CHECK-NEXT: %[[pad:[^ ]*]] = cleanuppad
-// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
-// CHECK: call void @"?fin$0 at 0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} 1, i8* noundef %[[fp]])
+// CHECK: %[[fp:[^ ]*]] = call ptr @llvm.localaddress()
+// CHECK: call void @"?fin$0 at 0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} 1, ptr noundef %[[fp]])
 // CHECK-NEXT: cleanupret from %[[pad]] unwind to caller
 
-// CHECK: define internal void @"?fin$0 at 0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} %[[abnormal:abnormal_termination]], i8* noundef %frame_pointer)
+// CHECK: define internal void @"?fin$0 at 0@use_abnormal_termination@@"({{i8 noundef( zeroext)?}} %[[abnormal:abnormal_termination]], ptr noundef %frame_pointer)
 // CHECK-SAME: [[finally_attrs]]
 // CHECK: %[[abnormal_zext:[^ ]*]] = zext i8 %[[abnormal]] to i32
-// CHECK: store i32 %[[abnormal_zext]], i32* @crashed
+// CHECK: store i32 %[[abnormal_zext]], ptr @crashed
 // CHECK-NEXT: ret void
 
 void noreturn_noop_finally(void) {
@@ -280,7 +280,7 @@ void finally_with_func(void) {
 }
 
 // CHECK-LABEL: define internal void @"?fin$0 at 0@finally_with_func@@"({{[^)]*}})
-// CHECK: call void @cleanup_with_func(i8* noundef getelementptr inbounds ([18 x i8], [18 x i8]* @"??_C at _0BC@COAGBPGM at finally_with_func?$AA@", i{{32|64}} 0, i{{32|64}} 0))
+// CHECK: call void @cleanup_with_func(ptr noundef @"??_C at _0BC@COAGBPGM at finally_with_func?$AA@")
 
 // Look for the absence of noinline.  nounwind is expected; any further
 // attributes should be string attributes.

diff  --git a/clang/test/CodeGen/extend-arg-64.c b/clang/test/CodeGen/extend-arg-64.c
index 5f14811db9187..0749523b9ab3d 100644
--- a/clang/test/CodeGen/extend-arg-64.c
+++ b/clang/test/CodeGen/extend-arg-64.c
@@ -1,23 +1,23 @@
-// RUN: %clang_cc1 -no-opaque-pointers -DD128 -triple x86_64-apple-darwin -fextend-arguments=64  \
+// RUN: %clang_cc1 -DD128 -triple x86_64-apple-darwin -fextend-arguments=64  \
 // RUN:            -Wno-strict-prototypes %s -emit-llvm -o - | FileCheck %s -check-prefix=CHECKEXT
 
 // When the option isn't selected, no effect
-// RUN: %clang_cc1 -no-opaque-pointers -DD128 -triple x86_64-apple-darwin  \
+// RUN: %clang_cc1 -DD128 -triple x86_64-apple-darwin  \
 // RUN:            -Wno-strict-prototypes %s -emit-llvm -o - | FileCheck %s \
 // RUN:    --implicit-check-not "ext {{.*}}to i64"
 
 // The option isn't supported on x86, no effect
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-pc-linux-gnu -fextend-arguments=64 \
+// RUN: %clang_cc1 -triple i386-pc-linux-gnu -fextend-arguments=64 \
 // RUN:            -Wno-strict-prototypes %s -emit-llvm -o - | FileCheck %s \
 // RUN:    --implicit-check-not "ext {{.*}}to i64"
 
 // The option isn't supported on ppc, no effect
-// RUN: %clang_cc1 -no-opaque-pointers -triple ppc64le -fextend-arguments=64 \
+// RUN: %clang_cc1 -triple ppc64le -fextend-arguments=64 \
 // RUN:            -Wno-strict-prototypes %s -emit-llvm -o - | FileCheck %s \
 // RUN:    --implicit-check-not "ext {{.*}}to i64"
 
 // The option isn't supported on ppc, no effect
-// RUN: %clang_cc1 -no-opaque-pointers -DD128 -triple powerpc64-ibm-aix-xcoff -fextend-arguments=64 \
+// RUN: %clang_cc1 -DD128 -triple powerpc64-ibm-aix-xcoff -fextend-arguments=64 \
 // RUN:            -Wno-strict-prototypes %s -emit-llvm -o - | FileCheck %s \
 // RUN:    --implicit-check-not "ext {{.*}}to i64"
 
@@ -42,68 +42,68 @@ __int128 i128;
 int test(void) {
   // CHECK: define{{.*}} i32 @test{{.*}}
 
-  // CHECKEXT:  [[TAG_u32:%.*]] = load i32, i32* @u32{{.*}}
+  // CHECKEXT:  [[TAG_u32:%.*]] = load i32, ptr @u32{{.*}}
   // CHECKEXT: [[CONV_u32:%.*]] = zext i32 [[TAG_u32]] to i64
 
-  // CHECKEXT:  [[TAG_s32:%.*]] = load i32, i32* @s32
+  // CHECKEXT:  [[TAG_s32:%.*]] = load i32, ptr @s32
   // CHECKEXT: [[CONV_s32:%.*]] = sext i32 [[TAG_s32]] to i64
 
-  // CHECKEXT:  [[TAG_u16:%.*]] = load i16, i16* @u16
+  // CHECKEXT:  [[TAG_u16:%.*]] = load i16, ptr @u16
   // CHECKEXT: [[CONV_u16:%.*]] = zext i16 [[TAG_u16]] to i64
 
-  // CHECKEXT:  [[TAG_s16:%.*]] = load i16, i16* @s16
+  // CHECKEXT:  [[TAG_s16:%.*]] = load i16, ptr @s16
   // CHECKEXT: [[CONV_s16:%.*]] = sext i16 [[TAG_s16]] to i64
 
-  // CHECKEXT:  [[TAG_u8:%.*]] = load i8, i8* @u8
+  // CHECKEXT:  [[TAG_u8:%.*]] = load i8, ptr @u8
   // CHECKEXT: [[CONV_u8:%.*]] = zext i8 [[TAG_u8]] to i64
 
-  // CHECKEXT:  [[TAG_s8:%.*]] = load i8, i8* @s8
+  // CHECKEXT:  [[TAG_s8:%.*]] = load i8, ptr @s8
   // CHECKEXT: [[CONV_s8:%.*]] = sext i8 [[TAG_s8]] to i64
   // CHECKEXT: call{{.*}} @vararg(i32 noundef %0, i64 noundef [[CONV_u32]], i64 noundef [[CONV_s32]], i64 noundef [[CONV_u16]], i64 noundef [[CONV_s16]], i64 noundef [[CONV_u8]], i64 noundef [[CONV_s8]]
 
   int sum = 0;
   sum = vararg(sum, u32, s32, u16, s16, u8, s8);
   knr(ll);
-  // CHECKEXT: load i64, i64* @ll
-  // CHECKEXT-NEXT: call void (i64, ...) bitcast {{.*}} @knr
+  // CHECKEXT: load i64, ptr @ll
+  // CHECKEXT-NEXT: call void (i64, ...) @knr
 
   knr(ei23);
-  // CHECKEXT: load i23, i23* @ei23
-  // CHECKEXT-NEXT: call void (i23, ...) bitcast{{.*}} @knr
+  // CHECKEXT: load i23, ptr @ei23
+  // CHECKEXT-NEXT: call void (i23, ...) @knr
 
   knr(ff);
   // CHECKEXT: load float
   // CHECKEXT-NEXT: fpext float {{.*}} to double
-  // CHECKEXT-NEXT: call{{.*}} void (double, ...) bitcast{{.*}} @knr
+  // CHECKEXT-NEXT: call{{.*}} void (double, ...) @knr
 
   knr(dd);
   // CHECKEXT: load double
-  // CHECKEXT-NEXT: call{{.*}} void (double, ...) bitcast{{.*}} @knr
+  // CHECKEXT-NEXT: call{{.*}} void (double, ...) @knr
 
 #ifdef D128
   knr(i128);
   // CHECKEXT: load i128
-  // CHECKEXT: call{{.*}} void (i64, i64, ...) bitcast{{.*}} @knr
+  // CHECKEXT: call{{.*}} void (i64, i64, ...) @knr
 #endif
 
   knr(u32, s32, u16, s16, u8, s8);
-  // CHECKEXT:  [[TAg_u32:%.*]] = load i32, i32* @u32{{.*}}
+  // CHECKEXT:  [[TAg_u32:%.*]] = load i32, ptr @u32{{.*}}
   // CHECKEXT: [[CONv_u32:%.*]] = zext i32 [[TAg_u32]] to i64
 
-  // CHECKEXT:  [[TAg_s32:%.*]] = load i32, i32* @s32
+  // CHECKEXT:  [[TAg_s32:%.*]] = load i32, ptr @s32
   // CHECKEXT: [[CONv_s32:%.*]] = sext i32 [[TAg_s32]] to i64
 
-  // CHECKEXT:  [[TAg_u16:%.*]] = load i16, i16* @u16
+  // CHECKEXT:  [[TAg_u16:%.*]] = load i16, ptr @u16
   // CHECKEXT: [[CONv_u16:%.*]] = zext i16 [[TAg_u16]] to i64
 
-  // CHECKEXT:  [[TAg_s16:%.*]] = load i16, i16* @s16
+  // CHECKEXT:  [[TAg_s16:%.*]] = load i16, ptr @s16
   // CHECKEXT: [[CONv_s16:%.*]] = sext i16 [[TAg_s16]] to i64
 
-  // CHECKEXT:  [[TAg_u8:%.*]] = load i8, i8* @u8
+  // CHECKEXT:  [[TAg_u8:%.*]] = load i8, ptr @u8
   // CHECKEXT: [[CONv_u8:%.*]] = zext i8 [[TAg_u8]] to i64
 
-  // CHECKEXT:  [[TAg_s8:%.*]] = load i8, i8* @s8
+  // CHECKEXT:  [[TAg_s8:%.*]] = load i8, ptr @s8
   // CHECKEXT: [[CONv_s8:%.*]] = sext i8 [[TAg_s8]] to i64
-  // CHECKEXT: call{{.*}} void (i64, i64, i64, i64, i64, i64, ...) bitcast{{.*}} @knr
+  // CHECKEXT: call{{.*}} void (i64, i64, i64, i64, i64, i64, ...) @knr
   return sum;
 }

diff  --git a/clang/test/CodeGen/hexagon-linux-vararg.c b/clang/test/CodeGen/hexagon-linux-vararg.c
index ffa949deb6c7b..033e72ab449d3 100644
--- a/clang/test/CodeGen/hexagon-linux-vararg.c
+++ b/clang/test/CodeGen/hexagon-linux-vararg.c
@@ -1,5 +1,5 @@
 // REQUIRES: hexagon-registered-target
-// RUN: %clang_cc1 -no-opaque-pointers -emit-llvm -triple hexagon-unknown-linux-musl %s -o - | FileCheck %s
+// RUN: %clang_cc1 -emit-llvm -triple hexagon-unknown-linux-musl %s -o - | FileCheck %s
 #include <stdarg.h>
 
 struct AAA {
@@ -9,52 +9,50 @@ struct AAA {
   int d;
 };
 
-// CHECK:   call void @llvm.va_start(i8* %arraydecay1)
-// CHECK:   %arraydecay2 = getelementptr inbounds [1 x %struct.__va_list_tag],
-// [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0
+// CHECK:   call void @llvm.va_start(ptr %arraydecay)
+// CHECK:   %arraydecay1 = getelementptr inbounds [1 x %struct.__va_list_tag],
+// ptr %ap, i32 0, i32 0
 // CHECK:   br label %vaarg.maybe_reg
 
 // CHECK: vaarg.maybe_reg:                                  ; preds = %entry
 // CHECK:   %__current_saved_reg_area_pointer_p = getelementptr inbounds
-// %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 0
-// CHECK:   %__current_saved_reg_area_pointer = load i8*, i8**
+// %struct.__va_list_tag, ptr %arraydecay2, i32 0, i32 0
+// CHECK:   %__current_saved_reg_area_pointer = load ptr, ptr
 // %__current_saved_reg_area_pointer_p
 // CHECK:   %__saved_reg_area_end_pointer_p = getelementptr inbounds
-// %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 1
-// CHECK:   %__saved_reg_area_end_pointer = load i8*, i8**
+// %struct.__va_list_tag, ptr %arraydecay2, i32 0, i32 1
+// CHECK:   %__saved_reg_area_end_pointer = load ptr, ptr
 // %__saved_reg_area_end_pointer_p
-// CHECK:   %__new_saved_reg_area_pointer = getelementptr i8, i8*
+// CHECK:   %__new_saved_reg_area_pointer = getelementptr i8, ptr
 // %__current_saved_reg_area_pointer, i32 4
-// CHECK:   %0 = icmp sgt i8* %__new_saved_reg_area_pointer,
+// CHECK:   %0 = icmp sgt ptr %__new_saved_reg_area_pointer,
 // %__saved_reg_area_end_pointer
 // CHECK:   br i1 %0, label %vaarg.on_stack, label %vaarg.in_reg
 
 // CHECK: vaarg.in_reg:                                     ; preds =
 // %vaarg.maybe_reg
-// CHECK:   %1 = bitcast i8* %__current_saved_reg_area_pointer to i32*
-// CHECK:   store i8* %__new_saved_reg_area_pointer, i8**
+// CHECK:   store ptr %__new_saved_reg_area_pointer, ptr
 // %__current_saved_reg_area_pointer_p
 // CHECK:   br label %vaarg.end
 
 // CHECK: vaarg.on_stack:                                   ; preds =
 // %vaarg.maybe_reg
 // CHECK:   %__overflow_area_pointer_p = getelementptr inbounds
-// %struct.__va_list_tag, %struct.__va_list_tag* %arraydecay2, i32 0, i32 2
-// CHECK:   %__overflow_area_pointer = load i8*, i8** %__overflow_area_pointer_p
-// CHECK:   %__overflow_area_pointer.next = getelementptr i8, i8*
+// %struct.__va_list_tag, ptr %arraydecay2, i32 0, i32 2
+// CHECK:   %__overflow_area_pointer = load ptr, ptr %__overflow_area_pointer_p
+// CHECK:   %__overflow_area_pointer.next = getelementptr i8, ptr
 // %__overflow_area_pointer, i32 4
-// CHECK:   store i8* %__overflow_area_pointer.next, i8**
+// CHECK:   store ptr %__overflow_area_pointer.next, ptr
 // %__overflow_area_pointer_p
-// CHECK:   store i8* %__overflow_area_pointer.next, i8**
+// CHECK:   store ptr %__overflow_area_pointer.next, ptr
 // %__current_saved_reg_area_pointer_p
-// CHECK:   %2 = bitcast i8* %__overflow_area_pointer to i32*
 // CHECK:   br label %vaarg.end
 
 // CHECK: vaarg.end:                                        ; preds =
 // %vaarg.on_stack, %vaarg.in_reg
-// CHECK:   %vaarg.addr = phi i32* [ %1, %vaarg.in_reg ], [ %2, %vaarg.on_stack
+// CHECK:   %vaarg.addr = phi ptr [ %__current_saved_reg_area_pointer, %vaarg.in_reg ], [ %__overflow_area_pointer, %vaarg.on_stack
 // ]
-// CHECK:   %3 = load i32, i32* %vaarg.addr
+// CHECK:   %1 = load i32, ptr %vaarg.addr
 
 struct AAA aaa = {100, 200, 300, 400};
 

diff  --git a/clang/test/CodeGen/ifunc.c b/clang/test/CodeGen/ifunc.c
index 895141914db65..64f7f3d4ec65c 100644
--- a/clang/test/CodeGen/ifunc.c
+++ b/clang/test/CodeGen/ifunc.c
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-unknown-linux-gnu -O2 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple i386-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple i386-unknown-linux-gnu -O2 -emit-llvm -o - %s | FileCheck %s
 
 int foo(int) __attribute__ ((ifunc("foo_ifunc")));
 
@@ -34,8 +34,8 @@ extern void goo(void) __attribute__ ((ifunc("goo_ifunc")));
 void* goo_ifunc(void) {
   return 0;
 }
-// CHECK: @foo = ifunc i32 (i32), i32 (i32)* ()* @foo_ifunc
-// CHECK: @goo = ifunc void (), bitcast (i8* ()* @goo_ifunc to void ()* ()*)
+// CHECK: @foo = ifunc i32 (i32), ptr @foo_ifunc
+// CHECK: @goo = ifunc void (), ptr @goo_ifunc
 
 // CHECK: call i32 @foo(i32
 // CHECK: call void @goo()

diff  --git a/clang/test/CodeGen/incomplete-function-type-2.c b/clang/test/CodeGen/incomplete-function-type-2.c
index 24e2c83d085d9..4abed7eb0ab3f 100644
--- a/clang/test/CodeGen/incomplete-function-type-2.c
+++ b/clang/test/CodeGen/incomplete-function-type-2.c
@@ -1,8 +1,8 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -o - %s | FileCheck %s
 
 // PR14355: don't crash
 // Keep this test in its own file because CodeGenTypes has global state.
-// CHECK: define{{.*}} void @test10_foo({}* noundef %p1.coerce) [[NUW:#[0-9]+]] {
+// CHECK: define{{.*}} void @test10_foo(ptr noundef %p1) [[NUW:#[0-9]+]] {
 struct test10_B;
 typedef struct test10_B test10_F3(double);
 void test10_foo(test10_F3 p1);

diff  --git a/clang/test/CodeGen/init-memset.c b/clang/test/CodeGen/init-memset.c
index e3e041a449592..e5fd86a74a546 100644
--- a/clang/test/CodeGen/init-memset.c
+++ b/clang/test/CodeGen/init-memset.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-unknown -O0 -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -O0 -emit-llvm -o - %s | FileCheck %s
 
 void use(void *);
 
@@ -55,12 +55,6 @@ void test_most_a(void) {
 void test_pointers(void) {
   // CHECK-LABEL: define{{.*}} void @test_pointers()
   void *a[] = {&use, &use, &use, &use, &use, &use};
-  // CHECK: call void @llvm.memset.{{.*}}
-  // CHECK: store i8*
-  // CHECK: store i8*
-  // CHECK: store i8*
-  // CHECK: store i8*
-  // CHECK: store i8*
-  // CHECK: store i8*
+  // CHECK: call void @llvm.memcpy.{{.*}}
   use(a);
 }

diff  --git a/clang/test/CodeGen/init.c b/clang/test/CodeGen/init.c
index 74e769a40af20..0a2d739e1a80d 100644
--- a/clang/test/CodeGen/init.c
+++ b/clang/test/CodeGen/init.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s
 
 struct I { int k[3]; };
 struct M { struct I o[2]; };
@@ -88,11 +88,11 @@ const char large_array_with_zeroes[1000] = {
 
 char global;
 
-// CHECK-DAG: @large_array_with_zeroes_2 ={{.*}} global <{ [10 x i8*], [90 x i8*] }> <{ [10 x i8*] [i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* @global], [90 x i8*] zeroinitializer }>
+// CHECK-DAG: @large_array_with_zeroes_2 ={{.*}} global <{ [10 x ptr], [90 x ptr] }> <{ [10 x ptr] [ptr null, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null, ptr @global], [90 x ptr] zeroinitializer }>
 const void *large_array_with_zeroes_2[100] = {
   [9] = &global
 };
-// CHECK-DAG: @large_array_with_zeroes_3 ={{.*}} global <{ [10 x i8*], [990 x i8*] }> <{ [10 x i8*] [i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* @global], [990 x i8*] zeroinitializer }>
+// CHECK-DAG: @large_array_with_zeroes_3 ={{.*}} global <{ [10 x ptr], [990 x ptr] }> <{ [10 x ptr] [ptr null, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null, ptr @global], [990 x ptr] zeroinitializer }>
 const void *large_array_with_zeroes_3[1000] = {
   [9] = &global
 };
@@ -103,9 +103,9 @@ char test8(int X) {
   return str[X];
   // CHECK-LABEL: @test8(
   // CHECK: call void @llvm.memset
-  // CHECK: store i8 97, i8* %{{[0-9]*}}, align 1
-  // CHECK: store i8 98, i8* %{{[0-9]*}}, align 1
-  // CHECK: store i8 99, i8* %{{[0-9]*}}, align 1
+  // CHECK: store i8 97, ptr %{{[0-9]*}}, align 1
+  // CHECK: store i8 98, ptr %{{[0-9]*}}, align 1
+  // CHECK: store i8 99, ptr %{{[0-9]*}}, align 1
   // CHECK-NOT: getelementptr
   // CHECK: load
 }
@@ -145,7 +145,7 @@ void nonzeroMemseti8(void) {
   // CHECK-LABEL: @nonzeroMemseti8(
   // CHECK-NOT: store
   // CHECK-NOT: memcpy
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 42, i32 33, i1 false)
+  // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 42, i32 33, i1 false)
 }
 
 void nonzeroMemseti16(void) {
@@ -153,7 +153,7 @@ void nonzeroMemseti16(void) {
   // CHECK-LABEL: @nonzeroMemseti16(
   // CHECK-NOT: store
   // CHECK-NOT: memcpy
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 66, i32 34, i1 false)
+  // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 66, i32 34, i1 false)
 }
 
 void nonzeroMemseti32(void) {
@@ -161,7 +161,7 @@ void nonzeroMemseti32(void) {
   // CHECK-LABEL: @nonzeroMemseti32(
   // CHECK-NOT: store
   // CHECK-NOT: memcpy
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 -16, i32 36, i1 false)
+  // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 -16, i32 36, i1 false)
 }
 
 void nonzeroMemseti64(void) {
@@ -169,7 +169,7 @@ void nonzeroMemseti64(void) {
   // CHECK-LABEL: @nonzeroMemseti64(
   // CHECK-NOT: store
   // CHECK-NOT: memcpy
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 -86, i32 56, i1 false)
+  // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 -86, i32 56, i1 false)
 }
 
 void nonzeroMemsetf32(void) {
@@ -177,7 +177,7 @@ void nonzeroMemsetf32(void) {
   // CHECK-LABEL: @nonzeroMemsetf32(
   // CHECK-NOT: store
   // CHECK-NOT: memcpy
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 101, i32 36, i1 false)
+  // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 101, i32 36, i1 false)
 }
 
 void nonzeroMemsetf64(void) {
@@ -185,7 +185,7 @@ void nonzeroMemsetf64(void) {
   // CHECK-LABEL: @nonzeroMemsetf64(
   // CHECK-NOT: store
   // CHECK-NOT: memcpy
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 68, i32 56, i1 false)
+  // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 68, i32 56, i1 false)
 }
 
 void nonzeroPaddedUnionMemset(void) {
@@ -194,7 +194,7 @@ void nonzeroPaddedUnionMemset(void) {
   // CHECK-LABEL: @nonzeroPaddedUnionMemset(
   // CHECK-NOT: store
   // CHECK-NOT: memcpy
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 -16, i32 36, i1 false)
+  // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 -16, i32 36, i1 false)
 }
 
 void nonzeroNestedMemset(void) {
@@ -204,7 +204,7 @@ void nonzeroNestedMemset(void) {
   // CHECK-LABEL: @nonzeroNestedMemset(
   // CHECK-NOT: store
   // CHECK-NOT: memcpy
-  // CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 -16, i32 40, i1 false)
+  // CHECK: call void @llvm.memset.p0.i32(ptr {{.*}}, i8 -16, i32 40, i1 false)
 }
 
 // PR9257
@@ -214,10 +214,10 @@ struct test11S {
 void test11(struct test11S *P) {
   *P = (struct test11S) { .A = { [0 ... 3] = 4 } };
   // CHECK-LABEL: @test11(
-  // CHECK: store i32 4, i32* %{{.*}}, align 4
-  // CHECK: store i32 4, i32* %{{.*}}, align 4
-  // CHECK: store i32 4, i32* %{{.*}}, align 4
-  // CHECK: store i32 4, i32* %{{.*}}, align 4
+  // CHECK: store i32 4, ptr %{{.*}}, align 4
+  // CHECK: store i32 4, ptr %{{.*}}, align 4
+  // CHECK: store i32 4, ptr %{{.*}}, align 4
+  // CHECK: store i32 4, ptr %{{.*}}, align 4
   // CHECK: ret void
 }
 
@@ -238,9 +238,9 @@ void test13(int x) {
 
 // CHECK-LABEL: @PR20473(
 void PR20473(void) {
-  // CHECK: memcpy{{.*}}getelementptr inbounds ([2 x i8], [2 x i8]* @
+  // CHECK: memcpy{{.*}}
   bar((char[2]) {""});
-  // CHECK: memcpy{{.*}}getelementptr inbounds ([3 x i8], [3 x i8]* @
+  // CHECK: memcpy{{.*}}
   bar((char[3]) {""});
 }
 
@@ -250,7 +250,7 @@ struct S14 { int a[16]; };
 
 void test14(struct S14 *s14) {
   // CHECK-LABEL: @test14(
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 {{.*}}, i8* align 4 {{.*}} [[INIT14]] {{.*}}, i32 64, i1 false)
+  // CHECK: call void @llvm.memcpy.p0.p0.i32(ptr align 4 {{.*}}, ptr align 4 [[INIT14]], i32 64, i1 false)
   // CHECK-NOT: store
   // CHECK: ret void
   *s14 = (struct S14) { { [5 ... 11] = 17 } };

diff  --git a/clang/test/CodeGen/mangle-blocks.c b/clang/test/CodeGen/mangle-blocks.c
index 2e671dc880325..c87ee3a8ea9d0 100644
--- a/clang/test/CodeGen/mangle-blocks.c
+++ b/clang/test/CodeGen/mangle-blocks.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-apple-ios -fblocks -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple i386-apple-ios -fblocks -emit-llvm -o - %s | FileCheck %s
 
 void __assert_rtn(const char *, const char *, int, const char *)
     __attribute__ (( noreturn ));
@@ -15,9 +15,9 @@ void (^mangle(void))(void) {
 // CHECK: @.str{{.*}} = private unnamed_addr constant {{.*}}, align 1
 // CHECK: @.str[[STR1:.*]] = private unnamed_addr constant [7 x i8] c"mangle\00", align 1
 
-// CHECK: define internal void @__mangle_block_invoke(i8* noundef %.block_descriptor)
+// CHECK: define internal void @__mangle_block_invoke(ptr noundef %.block_descriptor)
 
-// CHECK: define internal void @__mangle_block_invoke_2(i8* noundef %.block_descriptor){{.*}}{
-// CHECK:   call void @__assert_rtn(i8* noundef getelementptr inbounds ([22 x i8], [22 x i8]* @__func__.__mangle_block_invoke_2, i32 0, i32 0), i8* noundef getelementptr inbounds {{.*}}, i32 noundef 9, i8* noundef getelementptr inbounds ([7 x i8], [7 x i8]* @.str[[STR1]], i32 0, i32 0))
+// CHECK: define internal void @__mangle_block_invoke_2(ptr noundef %.block_descriptor){{.*}}{
+// CHECK:   call void @__assert_rtn(ptr noundef @__func__.__mangle_block_invoke_2, ptr noundef @{{.*}}, i32 noundef 9, ptr noundef @.str[[STR1]])
 // CHECK: }
 

diff  --git a/clang/test/CodeGen/matrix-cast.c b/clang/test/CodeGen/matrix-cast.c
index ec0a3f435c9b7..80990b40ea2d0 100644
--- a/clang/test/CodeGen/matrix-cast.c
+++ b/clang/test/CodeGen/matrix-cast.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
+// RUN: %clang_cc1 -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
 
 typedef char cx5x5 __attribute__((matrix_type(5, 5)));
 typedef int ix5x5 __attribute__((matrix_type(5, 5)));
@@ -11,9 +11,9 @@ typedef unsigned long unsigned_long_int_5x5 __attribute__((matrix_type(5, 5)));
 
 void cast_char_matrix_to_int(cx5x5 c, ix5x5 i) {
   // CHECK-LABEL: define{{.*}} void @cast_char_matrix_to_int(<25 x i8> noundef %c, <25 x i32> noundef %i)
-  // CHECK:       [[C:%.*]] = load <25 x i8>, <25 x i8>* {{.*}}, align 1
+  // CHECK:       [[C:%.*]] = load <25 x i8>, ptr {{.*}}, align 1
   // CHECK-NEXT:  [[CONV:%.*]] = sext <25 x i8> [[C]] to <25 x i32>
-  // CHECK-NEXT:  store <25 x i32> [[CONV]], <25 x i32>* {{.*}}, align 4
+  // CHECK-NEXT:  store <25 x i32> [[CONV]], ptr {{.*}}, align 4
   // CHECK-NEXT:  ret void
 
   i = (ix5x5)c;
@@ -21,9 +21,9 @@ void cast_char_matrix_to_int(cx5x5 c, ix5x5 i) {
 
 void cast_char_matrix_to_unsigned_int(cx5x5 c, unsigned_int_5x5 u) {
   // CHECK-LABEL: define{{.*}} void @cast_char_matrix_to_unsigned_int(<25 x i8> noundef %c, <25 x i32> noundef %u)
-  // CHECK:       [[C:%.*]] = load <25 x i8>, <25 x i8>* {{.*}}, align 1
+  // CHECK:       [[C:%.*]] = load <25 x i8>, ptr {{.*}}, align 1
   // CHECK-NEXT:  [[CONV:%.*]] = sext <25 x i8> [[C]] to <25 x i32>
-  // CHECK-NEXT:  store <25 x i32> [[CONV]], <25 x i32>* {{.*}}, align 4
+  // CHECK-NEXT:  store <25 x i32> [[CONV]], ptr {{.*}}, align 4
   // CHECK-NEXT:  ret void
 
   u = (unsigned_int_5x5)c;
@@ -31,9 +31,9 @@ void cast_char_matrix_to_unsigned_int(cx5x5 c, unsigned_int_5x5 u) {
 
 void cast_unsigned_long_int_matrix_to_short(unsigned_long_int_5x5 u, sx5x5 s) {
   // CHECK-LABEL: define{{.*}} void @cast_unsigned_long_int_matrix_to_short(<25 x i64> noundef %u, <25 x i16> noundef %s)
-  // CHECK:       [[U:%.*]] = load <25 x i64>, <25 x i64>* {{.*}}, align 8
+  // CHECK:       [[U:%.*]] = load <25 x i64>, ptr {{.*}}, align 8
   // CHECK-NEXT:  [[CONV:%.*]] = trunc <25 x i64> [[U]] to <25 x i16>
-  // CHECK-NEXT:  store <25 x i16> [[CONV]], <25 x i16>* {{.*}}, align 2
+  // CHECK-NEXT:  store <25 x i16> [[CONV]], ptr {{.*}}, align 2
   // CHECK-NEXT:  ret void
 
   s = (sx5x5)u;
@@ -41,9 +41,9 @@ void cast_unsigned_long_int_matrix_to_short(unsigned_long_int_5x5 u, sx5x5 s) {
 
 void cast_int_matrix_to_short(ix5x5 i, sx5x5 s) {
   // CHECK-LABEL: define{{.*}} void @cast_int_matrix_to_short(<25 x i32> noundef %i, <25 x i16> noundef %s)
-  // CHECK:       [[I:%.*]] = load <25 x i32>, <25 x i32>* {{.*}}, align 4
+  // CHECK:       [[I:%.*]] = load <25 x i32>, ptr {{.*}}, align 4
   // CHECK-NEXT:  [[CONV:%.*]] = trunc <25 x i32> [[I]] to <25 x i16>
-  // CHECK-NEXT:  store <25 x i16> [[CONV]], <25 x i16>* {{.*}}, align 2
+  // CHECK-NEXT:  store <25 x i16> [[CONV]], ptr {{.*}}, align 2
   // CHECK-NEXT:  ret void
 
   s = (sx5x5)i;
@@ -51,9 +51,9 @@ void cast_int_matrix_to_short(ix5x5 i, sx5x5 s) {
 
 void cast_int_matrix_to_float(ix5x5 i, fx5x5 f) {
   // CHECK-LABEL: define{{.*}} void @cast_int_matrix_to_float(<25 x i32> noundef %i, <25 x float> noundef %f)
-  // CHECK:       [[I:%.*]] = load <25 x i32>, <25 x i32>* {{.*}}, align 4
+  // CHECK:       [[I:%.*]] = load <25 x i32>, ptr {{.*}}, align 4
   // CHECK-NEXT:  [[CONV:%.*]] = sitofp <25 x i32> [[I]] to <25 x float>
-  // CHECK-NEXT:  store <25 x float> [[CONV]], <25 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <25 x float> [[CONV]], ptr {{.*}}, align 4
   // CHECK-NEXT:  ret void
 
   f = (fx5x5)i;
@@ -61,9 +61,9 @@ void cast_int_matrix_to_float(ix5x5 i, fx5x5 f) {
 
 void cast_unsigned_int_matrix_to_float(unsigned_short_int_5x5 u, fx5x5 f) {
   // CHECK-LABEL: define{{.*}} void @cast_unsigned_int_matrix_to_float(<25 x i16> noundef %u, <25 x float> noundef %f)
-  // CHECK:       [[U:%.*]] = load <25 x i16>, <25 x i16>* {{.*}}, align 2
+  // CHECK:       [[U:%.*]] = load <25 x i16>, ptr {{.*}}, align 2
   // CHECK-NEXT:  [[CONV:%.*]] = uitofp <25 x i16> [[U]] to <25 x float>
-  // CHECK-NEXT:  store <25 x float> [[CONV]], <25 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <25 x float> [[CONV]], ptr {{.*}}, align 4
   // CHECK-NEXT:  ret void
 
   f = (fx5x5)u;
@@ -71,9 +71,9 @@ void cast_unsigned_int_matrix_to_float(unsigned_short_int_5x5 u, fx5x5 f) {
 
 void cast_double_matrix_to_int(dx5x5 d, ix5x5 i) {
   // CHECK-LABEL: define{{.*}} void @cast_double_matrix_to_int(<25 x double> noundef %d, <25 x i32> noundef %i)
-  // CHECK:       [[D:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8
+  // CHECK:       [[D:%.*]] = load <25 x double>, ptr {{.*}}, align 8
   // CHECK-NEXT:  [[CONV:%.*]] = fptosi <25 x double> [[D]] to <25 x i32>
-  // CHECK-NEXT:  store <25 x i32> [[CONV]], <25 x i32>* {{.*}}, align 4
+  // CHECK-NEXT:  store <25 x i32> [[CONV]], ptr {{.*}}, align 4
   // CHECK-NEXT:  ret void
 
   i = (ix5x5)d;
@@ -81,9 +81,9 @@ void cast_double_matrix_to_int(dx5x5 d, ix5x5 i) {
 
 void cast_float_matrix_to_unsigned_short_int(fx5x5 f, unsigned_short_int_5x5 i) {
   // CHECK-LABEL: define{{.*}} void @cast_float_matrix_to_unsigned_short_int(<25 x float> noundef %f, <25 x i16> noundef %i)
-  // CHECK:       [[F:%.*]] = load <25 x float>, <25 x float>* {{.*}}, align 4
+  // CHECK:       [[F:%.*]] = load <25 x float>, ptr {{.*}}, align 4
   // CHECK-NEXT:  [[CONV:%.*]] = fptoui <25 x float> [[F]] to <25 x i16>
-  // CHECK-NEXT:  store <25 x i16> [[CONV]], <25 x i16>* %1, align 2
+  // CHECK-NEXT:  store <25 x i16> [[CONV]], ptr %i.addr, align 2
   // CHECK-NEXT:  ret void
 
   i = (unsigned_short_int_5x5)f;
@@ -91,9 +91,9 @@ void cast_float_matrix_to_unsigned_short_int(fx5x5 f, unsigned_short_int_5x5 i)
 
 void cast_double_matrix_to_float(dx5x5 d, fx5x5 f) {
   // CHECK-LABEL: define{{.*}} void @cast_double_matrix_to_float(<25 x double> noundef %d, <25 x float> noundef %f)
-  // CHECK:       [[D:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8
+  // CHECK:       [[D:%.*]] = load <25 x double>, ptr {{.*}}, align 8
   // CHECK-NEXT:  [[CONV:%.*]] = fptrunc <25 x double> [[D]] to <25 x float>
-  // CHECK-NEXT:  store <25 x float> [[CONV]], <25 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <25 x float> [[CONV]], ptr {{.*}}, align 4
   // CHECK-NEXT:  ret void
 
   f = (fx5x5)d;
@@ -101,9 +101,9 @@ void cast_double_matrix_to_float(dx5x5 d, fx5x5 f) {
 
 void cast_unsigned_short_int_to_unsigned_int(unsigned_short_int_5x5 s, unsigned_int_5x5 i) {
   // CHECK-LABEL: define{{.*}} void @cast_unsigned_short_int_to_unsigned_int(<25 x i16> noundef %s, <25 x i32> noundef %i)
-  // CHECK:       [[S:%.*]] = load <25 x i16>, <25 x i16>* {{.*}}, align 2
+  // CHECK:       [[S:%.*]] = load <25 x i16>, ptr {{.*}}, align 2
   // CHECK-NEXT:  [[CONV:%.*]] = zext <25 x i16> [[S]] to <25 x i32>
-  // CHECK-NEXT:  store <25 x i32> [[CONV]], <25 x i32>* {{.*}}, align 4
+  // CHECK-NEXT:  store <25 x i32> [[CONV]], ptr {{.*}}, align 4
   // CHECK-NEXT:  ret void
 
   i = (unsigned_int_5x5)s;
@@ -111,9 +111,9 @@ void cast_unsigned_short_int_to_unsigned_int(unsigned_short_int_5x5 s, unsigned_
 
 void cast_unsigned_long_int_to_unsigned_short_int(unsigned_long_int_5x5 l, unsigned_short_int_5x5 s) {
   // CHECK-LABEL: define{{.*}} void @cast_unsigned_long_int_to_unsigned_short_int(<25 x i64> noundef %l, <25 x i16> noundef %s)
-  // CHECK:       [[L:%.*]] = load <25 x i64>, <25 x i64>* %0, align 8
+  // CHECK:       [[L:%.*]] = load <25 x i64>, ptr %l.addr, align 8
   // CHECK-NEXT:  [[CONV:%.*]] = trunc <25 x i64> [[L]] to <25 x i16>
-  // CHECK-NEXT:  store <25 x i16> [[CONV]], <25 x i16>* {{.*}}, align 2
+  // CHECK-NEXT:  store <25 x i16> [[CONV]], ptr {{.*}}, align 2
   // CHECK-NEXT:  ret void
 
   s = (unsigned_short_int_5x5)l;
@@ -121,9 +121,9 @@ void cast_unsigned_long_int_to_unsigned_short_int(unsigned_long_int_5x5 l, unsig
 
 void cast_unsigned_short_int_to_int(unsigned_short_int_5x5 u, ix5x5 i) {
   // CHECK-LABEL: define{{.*}} void @cast_unsigned_short_int_to_int(<25 x i16> noundef %u, <25 x i32> noundef %i)
-  // CHECK:       [[U:%.*]] = load <25 x i16>, <25 x i16>* %0, align 2
+  // CHECK:       [[U:%.*]] = load <25 x i16>, ptr %u.addr, align 2
   // CHECK-NEXT:  [[CONV:%.*]] = zext <25 x i16> [[U]] to <25 x i32>
-  // CHECK-NEXT:  store <25 x i32> [[CONV]], <25 x i32>* {{.*}}, align 4
+  // CHECK-NEXT:  store <25 x i32> [[CONV]], ptr {{.*}}, align 4
   // CHECK-NEXT:  ret void
 
   i = (ix5x5)u;
@@ -131,9 +131,9 @@ void cast_unsigned_short_int_to_int(unsigned_short_int_5x5 u, ix5x5 i) {
 
 void cast_int_to_unsigned_long_int(ix5x5 i, unsigned_long_int_5x5 u) {
   // CHECK-LABEL: define{{.*}} void @cast_int_to_unsigned_long_int(<25 x i32> noundef %i, <25 x i64> noundef %u)
-  // CHECK:       [[I:%.*]] = load <25 x i32>, <25 x i32>* %0, align 4
+  // CHECK:       [[I:%.*]] = load <25 x i32>, ptr %i.addr, align 4
   // CHECK-NEXT:  [[CONV:%.*]] = sext <25 x i32> [[I]] to <25 x i64>
-  // CHECK-NEXT:  store <25 x i64> [[CONV]], <25 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:  store <25 x i64> [[CONV]], ptr {{.*}}, align 8
   // CHECK-NEXT:  ret void
 
   u = (unsigned_long_int_5x5)i;

diff  --git a/clang/test/CodeGen/matrix-type-operators.c b/clang/test/CodeGen/matrix-type-operators.c
index 62a1838ed3a56..c588685851548 100644
--- a/clang/test/CodeGen/matrix-type-operators.c
+++ b/clang/test/CodeGen/matrix-type-operators.c
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -no-opaque-pointers -O0 -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck --check-prefixes=CHECK,NOOPT %s
-// RUN: %clang_cc1 -no-opaque-pointers -O1 -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck --check-prefixes=CHECK,OPT %s
+// RUN: %clang_cc1 -O0 -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck --check-prefixes=CHECK,NOOPT %s
+// RUN: %clang_cc1 -O1 -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck --check-prefixes=CHECK,OPT %s
 
 
 typedef double dx5x5_t __attribute__((matrix_type(5, 5)));
@@ -11,241 +11,241 @@ typedef unsigned long long ullx4x2_t __attribute__((matrix_type(4, 2)));
 
 void add_matrix_matrix_double(dx5x5_t a, dx5x5_t b, dx5x5_t c) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_matrix_double(<25 x double> noundef %a, <25 x double> noundef %b, <25 x double> noundef %c)
-  // NOOPT:       [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:  [[C:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT:         [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[C:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:  [[C:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT:         [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[C:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = fadd <25 x double> [[B]], [[C]]
-  // CHECK-NEXT:  store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:  store <25 x double> [[RES]], ptr {{.*}}, align 8
 
   a = b + c;
 }
 
 void add_compound_assign_matrix_double(dx5x5_t a, dx5x5_t b) {
   // CHECK-LABEL: define{{.*}} void @add_compound_assign_matrix_double(<25 x double> noundef %a, <25 x double> noundef %b)
-  // NOOPT:       [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:  [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT:         [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT:         [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = fadd <25 x double> [[A]], [[B]]
-  // CHECK-NEXT:  store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:  store <25 x double> [[RES]], ptr {{.*}}, align 8
 
   a += b;
 }
 
 void subtract_compound_assign_matrix_double(dx5x5_t a, dx5x5_t b) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_assign_matrix_double(<25 x double> noundef %a, <25 x double> noundef %b)
-  // NOOPT:       [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:  [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT:         [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT:         [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = fsub <25 x double> [[A]], [[B]]
-  // CHECK-NEXT:  store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:  store <25 x double> [[RES]], ptr {{.*}}, align 8
 
   a -= b;
 }
 
 void add_matrix_matrix_float(fx2x3_t a, fx2x3_t b, fx2x3_t c) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_matrix_float(<6 x float> noundef %a, <6 x float> noundef %b, <6 x float> noundef %c)
-  // NOOPT:       [[B:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // NOOPT-NEXT:  [[C:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // OPT:         [[B:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[C:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[C:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // OPT:         [[B:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[C:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = fadd <6 x float> [[B]], [[C]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
 
   a = b + c;
 }
 
 void add_compound_assign_matrix_float(fx2x3_t a, fx2x3_t b) {
   // CHECK-LABEL: define{{.*}} void @add_compound_assign_matrix_float(<6 x float> noundef %a, <6 x float> noundef %b)
-  // NOOPT:       [[B:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // NOOPT-NEXT:  [[A:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // OPT:         [[B:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[A:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // OPT:         [[B:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = fadd <6 x float> [[A]], [[B]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
 
   a += b;
 }
 
 void subtract_compound_assign_matrix_float(fx2x3_t a, fx2x3_t b) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_assign_matrix_float(<6 x float> noundef %a, <6 x float> noundef %b)
-  // NOOPT:       [[B:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // NOOPT-NEXT:  [[A:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // OPT:         [[B:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[A:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // OPT:         [[B:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = fsub <6 x float> [[A]], [[B]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
 
   a -= b;
 }
 
 void add_matrix_scalar_double_float(dx5x5_t a, float vf) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_double_float(<25 x double> noundef %a, float noundef %vf)
-  // NOOPT:       [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:  [[SCALAR:%.*]] = load float, float* %vf.addr, align 4{{$}}
-  // OPT:         [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[SCALAR:%.*]] = load float, float* %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:  [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4{{$}}
+  // OPT:         [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext float [[SCALAR]] to double
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd <25 x double> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:  store <25 x double> [[RES]], ptr {{.*}}, align 8
 
   a = a + vf;
 }
 
 void add_compound_matrix_scalar_double_float(dx5x5_t a, float vf) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_double_float(<25 x double> noundef %a, float noundef %vf)
-  // NOOPT:  [[SCALAR:%.*]] = load float, float* %vf.addr, align 4{{$}}
-  // OPT:    [[SCALAR:%.*]] = load float, float* %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:  [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4{{$}}
+  // OPT:    [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext float [[SCALAR]] to double
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd <25 x double> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:  store <25 x double> [[RES]], ptr {{.*}}, align 8
 
   a += vf;
 }
 
 void subtract_compound_matrix_scalar_double_float(dx5x5_t a, float vf) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_double_float(<25 x double> noundef %a, float noundef %vf)
-  // NOOPT:  [[SCALAR:%.*]] = load float, float* %vf.addr, align 4{{$}}
-  // OPT:    [[SCALAR:%.*]] = load float, float* %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:  [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4{{$}}
+  // OPT:    [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = fpext float [[SCALAR]] to double
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fsub <25 x double> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:  store <25 x double> [[RES]], ptr {{.*}}, align 8
 
   a -= vf;
 }
 
 void add_matrix_scalar_double_double(dx5x5_t a, double vd) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_double_double(<25 x double> noundef %a, double noundef %vd)
-  // NOOPT:       [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:  [[SCALAR:%.*]] = load double, double* %vd.addr, align 8{{$}}
-  // OPT:         [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[SCALAR:%.*]] = load double, double* %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:  [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8{{$}}
+  // OPT:         [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd <25 x double> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:  store <25 x double> [[RES]], ptr {{.*}}, align 8
 
   a = a + vd;
 }
 
 void add_compound_matrix_scalar_double_double(dx5x5_t a, double vd) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_double_double(<25 x double> noundef %a, double noundef %vd)
-  // NOOPT:       [[SCALAR:%.*]] = load double, double* %vd.addr, align 8{{$}}
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT:         [[SCALAR:%.*]] = load double, double* %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT:         [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd <25 x double> [[MATRIX]], [[SCALAR_EMBED1]]
-  // store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // store <25 x double> [[RES]], ptr {{.*}}, align 8
   a += vd;
 }
 
 void subtract_compound_matrix_scalar_double_double(dx5x5_t a, double vd) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_double_double(<25 x double> noundef %a, double noundef %vd)
-  // NOOPT:       [[SCALAR:%.*]] = load double, double* %vd.addr, align 8{{$}}
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT:         [[SCALAR:%.*]] = load double, double* %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT:         [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <25 x double> poison, double [[SCALAR]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <25 x double> [[SCALAR_EMBED]], <25 x double> poison, <25 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fsub <25 x double> [[MATRIX]], [[SCALAR_EMBED1]]
-  // store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // store <25 x double> [[RES]], ptr {{.*}}, align 8
   a -= vd;
 }
 
 void add_matrix_scalar_float_float(fx2x3_t b, float vf) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_float_float(<6 x float> noundef %b, float noundef %vf)
-  // NOOPT:       [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // NOOPT-NEXT:  [[SCALAR:%.*]] = load float, float* %vf.addr, align 4{{$}}
-  // OPT:         [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[SCALAR:%.*]] = load float, float* %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[MATRIX:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4{{$}}
+  // OPT:         [[MATRIX:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <6 x float> poison, float [[SCALAR]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <6 x float> [[SCALAR_EMBED]], <6 x float> poison, <6 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd <6 x float> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
 
   b = b + vf;
 }
 
 void add_compound_matrix_scalar_float_float(fx2x3_t b, float vf) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_float_float(<6 x float> noundef %b, float noundef %vf)
-  // NOOPT:       [[SCALAR:%.*]] = load float, float* %vf.addr, align 4{{$}}
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <6 x float>, <6 x float>* %0, align 4{{$}}
-  // OPT:         [[SCALAR:%.*]] = load float, float* %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <6 x float>, <6 x float>* %0, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <6 x float>, ptr %b.addr, align 4{{$}}
+  // OPT:         [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <6 x float>, ptr %b.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <6 x float> poison, float [[SCALAR]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <6 x float> [[SCALAR_EMBED]], <6 x float> poison, <6 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd <6 x float> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
   b += vf;
 }
 
 void subtract_compound_matrix_scalar_float_float(fx2x3_t b, float vf) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_float_float(<6 x float> noundef %b, float noundef %vf)
-  // NOOPT:       [[SCALAR:%.*]] = load float, float* %vf.addr, align 4{{$}}
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <6 x float>, <6 x float>* %0, align 4{{$}}
-  // OPT:         [[SCALAR:%.*]] = load float, float* %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <6 x float>, <6 x float>* %0, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <6 x float>, ptr %b.addr, align 4{{$}}
+  // OPT:         [[SCALAR:%.*]] = load float, ptr %vf.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <6 x float>, ptr %b.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <6 x float> poison, float [[SCALAR]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <6 x float> [[SCALAR_EMBED]], <6 x float> poison, <6 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fsub <6 x float> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
   b -= vf;
 }
 
 void add_matrix_scalar_float_double(fx2x3_t b, double vd) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_float_double(<6 x float> noundef %b, double noundef %vd)
-  // NOOPT:       [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // NOOPT-NEXT:  [[SCALAR:%.*]] = load double, double* %vd.addr, align 8{{$}}
-  // OPT:         [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[SCALAR:%.*]] = load double, double* %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[MATRIX:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8{{$}}
+  // OPT:         [[MATRIX:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_TRUNC:%.*]] = fptrunc double [[SCALAR]] to float
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <6 x float> poison, float [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <6 x float> [[SCALAR_EMBED]], <6 x float> poison, <6 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd <6 x float> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
 
   b = b + vd;
 }
 
 void add_compound_matrix_scalar_float_double(fx2x3_t b, double vd) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_float_double(<6 x float> noundef %b, double noundef %vd)
-  // NOOPT:       [[SCALAR:%.*]] = load double, double* %vd.addr, align 8{{$}}
-  // OPT:         [[SCALAR:%.*]] = load double, double* %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8{{$}}
+  // OPT:         [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_TRUNC:%.*]] = fptrunc double [[SCALAR]] to float
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <6 x float> poison, float [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <6 x float> [[SCALAR_EMBED]], <6 x float> poison, <6 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fadd <6 x float> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
   b += vd;
 }
 
 void subtract_compound_matrix_scalar_float_double(fx2x3_t b, double vd) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_float_double(<6 x float> noundef %b, double noundef %vd)
-  // NOOPT:       [[SCALAR:%.*]] = load double, double* %vd.addr, align 8{{$}}
-  // OPT:         [[SCALAR:%.*]] = load double, double* %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8{{$}}
+  // OPT:         [[SCALAR:%.*]] = load double, ptr %vd.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_TRUNC:%.*]] = fptrunc double [[SCALAR]] to float
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <6 x float>, <6 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <6 x float>, ptr {{.*}}, align 4{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <6 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <6 x float> poison, float [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <6 x float> [[SCALAR_EMBED]], <6 x float> poison, <6 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = fsub <6 x float> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <6 x float> [[RES]], <6 x float>* {{.*}}, align 4
+  // CHECK-NEXT:  store <6 x float> [[RES]], ptr {{.*}}, align 4
   b -= vd;
 }
 
@@ -253,332 +253,332 @@ void subtract_compound_matrix_scalar_float_double(fx2x3_t b, double vd) {
 
 void add_matrix_matrix_int(ix9x3_t a, ix9x3_t b, ix9x3_t c) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_matrix_int(<27 x i32> noundef %a, <27 x i32> noundef %b, <27 x i32> noundef %c)
-  // NOOPT:       [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
-  // NOOPT-NEXT:  [[C:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
-  // OPT:         [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[C:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[C:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
+  // OPT:         [[B:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[C:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = add <27 x i32> [[B]], [[C]]
-  // CHECK-NEXT:  store <27 x i32> [[RES]], <27 x i32>* {{.*}}, align 4
+  // CHECK-NEXT:  store <27 x i32> [[RES]], ptr {{.*}}, align 4
   a = b + c;
 }
 
 void add_compound_matrix_matrix_int(ix9x3_t a, ix9x3_t b) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_matrix_int(<27 x i32> noundef %a, <27 x i32> noundef %b)
-  // NOOPT:       [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
-  // NOOPT-NEXT:  [[A:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
-  // OPT:         [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[A:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
+  // OPT:         [[B:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = add <27 x i32> [[A]], [[B]]
-  // CHECK-NEXT:  store <27 x i32> [[RES]], <27 x i32>* {{.*}}, align 4
+  // CHECK-NEXT:  store <27 x i32> [[RES]], ptr {{.*}}, align 4
   a += b;
 }
 
 void subtract_compound_matrix_matrix_int(ix9x3_t a, ix9x3_t b) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_matrix_int(<27 x i32> noundef %a, <27 x i32> noundef %b)
-  // NOOPT:       [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
-  // NOOPT-NEXT:  [[A:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
-  // OPT:         [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[A:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
+  // OPT:         [[B:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = sub <27 x i32> [[A]], [[B]]
-  // CHECK-NEXT:  store <27 x i32> [[RES]], <27 x i32>* {{.*}}, align 4
+  // CHECK-NEXT:  store <27 x i32> [[RES]], ptr {{.*}}, align 4
   a -= b;
 }
 
 void add_matrix_matrix_unsigned_long_long(ullx4x2_t a, ullx4x2_t b, ullx4x2_t c) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_matrix_unsigned_long_long(<8 x i64> noundef %a, <8 x i64> noundef %b, <8 x i64> noundef %c)
-  // NOOPT:       [[B:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:  [[C:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // OPT:         [[B:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[C:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:  [[C:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // OPT:         [[B:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[C:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = add <8 x i64> [[B]], [[C]]
-  // CHECK-NEXT:  store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:  store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   a = b + c;
 }
 
 void add_compound_matrix_matrix_unsigned_long_long(ullx4x2_t a, ullx4x2_t b) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_matrix_unsigned_long_long(<8 x i64> noundef %a, <8 x i64> noundef %b)
-  // NOOPT:       [[B:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:  [[A:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // OPT:         [[B:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:    [[A:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // OPT:         [[B:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = add <8 x i64> [[A]], [[B]]
-  // CHECK-NEXT:  store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:  store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   a += b;
 }
 
 void subtract_compound_matrix_matrix_unsigned_long_long(ullx4x2_t a, ullx4x2_t b) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_matrix_unsigned_long_long(<8 x i64> noundef %a, <8 x i64> noundef %b)
-  // NOOPT:       [[B:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // OPT:         [[B:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // NOOPT-NEXT:  [[A:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // OPT-NEXT:    [[A:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[B:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // OPT:         [[B:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[RES:%.*]] = sub <8 x i64> [[A]], [[B]]
-  // CHECK-NEXT:  store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:  store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   a -= b;
 }
 
 void add_matrix_scalar_int_short(ix9x3_t a, short vs) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_int_short(<27 x i32> noundef %a, i16 noundef signext %vs)
-  // NOOPT:        [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-  // NOOPT-NEXT:   [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2{{$}}
-  // OPT:          [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:     [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[MATRIX:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+  // NOOPT-NEXT:   [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2{{$}}
+  // OPT:          [[MATRIX:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:     [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_EXT:%.*]] = sext i16 [[SCALAR]] to i32
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = add <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:   store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a = a + vs;
 }
 
 void add_compound_matrix_scalar_int_short(ix9x3_t a, short vs) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_int_short(<27 x i32> noundef %a, i16 noundef signext %vs)
-  // NOOPT:       [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2{{$}}
-  // OPT:         [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2{{$}}
+  // OPT:         [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = sext i16 [[SCALAR]] to i32
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <27 x i32>, ptr %a.addr, align 4{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <27 x i32>, ptr %a.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_EXT:%.*]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = add <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:  store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a += vs;
 }
 
 void subtract_compound_matrix_scalar_int_short(ix9x3_t a, short vs) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_int_short(<27 x i32> noundef %a, i16 noundef signext %vs)
-  // NOOPT:       [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2{{$}}
-  // OPT:         [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2{{$}}
+  // OPT:         [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = sext i16 [[SCALAR]] to i32
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <27 x i32>, ptr %a.addr, align 4{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <27 x i32>, ptr %a.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_EXT:%.*]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = sub <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:  store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a -= vs;
 }
 
 void add_matrix_scalar_int_long_int(ix9x3_t a, long int vli) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_int_long_int(<27 x i32> noundef %a, i64 noundef %vli)
-  // NOOPT:        [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-  // NOOPT-NEXT:   [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8{{$}}
-  // OPT:          [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:     [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[MATRIX:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+  // NOOPT-NEXT:   [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8{{$}}
+  // OPT:          [[MATRIX:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:     [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_TRUNC:%.*]] = trunc i64 [[SCALAR]] to i32
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = add <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:   store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a = a + vli;
 }
 
 void add_compound_matrix_scalar_int_long_int(ix9x3_t a, long int vli) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_int_long_int(<27 x i32> noundef %a, i64 noundef %vli)
-  // NOOPT:       [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8{{$}}
-  // OPT:         [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:  [[SCALAR_TRUNC:%.*]] = trunc i64 %1 to i32
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8{{$}}
+  // OPT:         [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:  [[SCALAR_TRUNC:%.*]] = trunc i64 [[SCALAR]] to i32
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <27 x i32>, ptr %a.addr, align 4{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <27 x i32>, ptr %a.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = add <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:  store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a += vli;
 }
 
 void subtract_compound_matrix_scalar_int_long_int(ix9x3_t a, long int vli) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_int_long_int(<27 x i32> noundef %a, i64 noundef %vli)
-  // NOOPT:       [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8{{$}}
-  // OPT:         [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:  [[SCALAR_TRUNC:%.*]] = trunc i64 %1 to i32
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8{{$}}
+  // OPT:         [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:  [[SCALAR_TRUNC:%.*]] = trunc i64 [[SCALAR]] to i32
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <27 x i32>, ptr %a.addr, align 4{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <27 x i32>, ptr %a.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = sub <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:  store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a -= vli;
 }
 
 void add_matrix_scalar_int_unsigned_long_long(ix9x3_t a, unsigned long long int vulli) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_int_unsigned_long_long(<27 x i32> noundef %a, i64 noundef %vulli)
-  // NOOPT:        [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-  // NOOPT-NEXT:   [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8{{$}}
-  // OPT:          [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:     [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[MATRIX:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+  // NOOPT-NEXT:   [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8{{$}}
+  // OPT:          [[MATRIX:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:     [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_TRUNC:%.*]] = trunc i64 [[SCALAR]] to i32
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = add <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:   store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a = a + vulli;
 }
 
 void add_compound_matrix_scalar_int_unsigned_long_long(ix9x3_t a, unsigned long long int vulli) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_int_unsigned_long_long(<27 x i32> noundef %a, i64 noundef %vulli)
-  // NOOPT:        [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8{{$}}
-  // OPT:          [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8{{$}}
+  // OPT:          [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_TRUNC:%.*]] = trunc i64 [[SCALAR]] to i32
-  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MATRIX_ADDR:%.*]], align 4{{$}}
-  // OPT-NEXT:     [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MATRIX_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <27 x i32>, ptr [[MATRIX_ADDR:%.*]], align 4{{$}}
+  // OPT-NEXT:     [[MATRIX:%.*]] = load <27 x i32>, ptr [[MATRIX_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = add <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:   store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a += vulli;
 }
 
 void subtract_compound_matrix_scalar_int_unsigned_long_long(ix9x3_t a, unsigned long long int vulli) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_int_unsigned_long_long(<27 x i32> noundef %a, i64 noundef %vulli)
-  // NOOPT:        [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8{{$}}
-  // OPT:          [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8{{$}}
+  // OPT:          [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_TRUNC:%.*]] = trunc i64 [[SCALAR]] to i32
-  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MATRIX_ADDR:%.*]], align 4{{$}}
-  // OPT-NEXT:     [[MATRIX:%.*]] = load <27 x i32>, <27 x i32>* [[MATRIX_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <27 x i32>, ptr [[MATRIX_ADDR:%.*]], align 4{{$}}
+  // OPT-NEXT:     [[MATRIX:%.*]] = load <27 x i32>, ptr [[MATRIX_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <27 x i32> poison, i32 [[SCALAR_TRUNC]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <27 x i32> [[SCALAR_EMBED]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = sub <27 x i32> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:   store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 
   a -= vulli;
 }
 
 void add_matrix_scalar_long_long_int_short(ullx4x2_t b, short vs) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_long_long_int_short(<8 x i64> noundef %b, i16 noundef signext %vs)
-  // NOOPT:         [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2{{$}}
-  // OPT:           [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2{{$}}
+  // OPT:           [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[SCALAR_EXT:%.*]] = sext i16 [[SCALAR]] to i64
-  // NOOPT-NEXT:    [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // OPT-NEXT:      [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:    [[MATRIX:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // OPT-NEXT:      [[MATRIX:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:    [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:    [[RES:%.*]] = add <8 x i64> [[SCALAR_EMBED1]], [[MATRIX]]
-  // CHECK-NEXT:    store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:    store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   b = vs + b;
 }
 
 void add_compound_matrix_scalar_long_long_int_short(ullx4x2_t b, short vs) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_long_long_int_short(<8 x i64> noundef %b, i16 noundef signext %vs)
-  // NOOPT:       [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2{{$}}
-  // OPT:         [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2{{$}}
+  // OPT:         [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = sext i16 [[SCALAR]] to i64
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = add <8 x i64> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:  store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   b += vs;
 }
 
 void subtract_compound_matrix_scalar_long_long_int_short(ullx4x2_t b, short vs) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_long_long_int_short(<8 x i64> noundef %b, i16 noundef signext %vs)
-  // NOOPT:       [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2{{$}}
-  // OPT:         [[SCALAR:%.*]] = load i16, i16* %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2{{$}}
+  // OPT:         [[SCALAR:%.*]] = load i16, ptr %vs.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EXT:%.*]] = sext i16 [[SCALAR]] to i64
-  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8{{$}}
-  // OPT-NEXT:    [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8{{$}}
+  // OPT-NEXT:    [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR_EXT]], i64 0
   // CHECK-NEXT:  [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:  [[RES:%.*]] = sub <8 x i64> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:  store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:  store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   b -= vs;
 }
 
 void add_matrix_scalar_long_long_int_int(ullx4x2_t b, long int vli) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_long_long_int_int(<8 x i64> noundef %b, i64 noundef %vli)
-  // NOOPT:         [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8{{$}}
-  // NOOPT-NEXT:    [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // OPT:           [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:      [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8{{$}}
+  // NOOPT-NEXT:    [[MATRIX:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // OPT:           [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[MATRIX:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR]], i64 0
   // CHECK-NEXT:    [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:    [[RES:%.*]] = add <8 x i64> [[SCALAR_EMBED1]], [[MATRIX]]
-  // CHECK-NEXT:    store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:    store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   b = vli + b;
 }
 
 void add_compound_matrix_scalar_long_long_int_int(ullx4x2_t b, long int vli) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_long_long_int_int(<8 x i64> noundef %b, i64 noundef %vli)
-  // NOOPT:        [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8{{$}}
-  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // OPT:          [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8{{$}}
+  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // OPT:          [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = add <8 x i64> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:   store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   b += vli;
 }
 
 void subtract_compound_matrix_scalar_long_long_int_int(ullx4x2_t b, long int vli) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_long_long_int_int(<8 x i64> noundef %b, i64 noundef %vli)
-  // NOOPT:        [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8{{$}}
-  // OPT:          [[SCALAR:%.*]] = load i64, i64* %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8{{$}}
-  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8{{$}}
+  // OPT:          [[SCALAR:%.*]] = load i64, ptr %vli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, ptr {{.*}}, align 8{{$}}
+  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = sub <8 x i64> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:   store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   b -= vli;
 }
 
 void add_matrix_scalar_long_long_int_unsigned_long_long(ullx4x2_t b, unsigned long long int vulli) {
   // CHECK-LABEL: define{{.*}} void @add_matrix_scalar_long_long_int_unsigned_long_long
-  // NOOPT:        [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8{{$}}
-  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8{{$}}
-  // OPT:          [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8{{$}}
+  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8{{$}}
+  // OPT:          [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = add <8 x i64> [[SCALAR_EMBED1]], [[MATRIX]]
-  // CHECK-NEXT:   store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:   store <8 x i64> [[RES]], ptr {{.*}}, align 8
   b = vulli + b;
 }
 
 void add_compound_matrix_scalar_long_long_int_unsigned_long_long(ullx4x2_t b, unsigned long long int vulli) {
   // CHECK-LABEL: define{{.*}} void @add_compound_matrix_scalar_long_long_int_unsigned_long_long
-  // NOOPT:        [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8{{$}}
-  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8{{$}}
-  // OPT:          [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8{{$}}
+  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8{{$}}
+  // OPT:          [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = add <8 x i64> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:   store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   b += vulli;
 }
 
 void subtract_compound_matrix_scalar_long_long_int_unsigned_long_long(ullx4x2_t b, unsigned long long int vulli) {
   // CHECK-LABEL: define{{.*}} void @subtract_compound_matrix_scalar_long_long_int_unsigned_long_long
-  // NOOPT:        [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8{{$}}
-  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8{{$}}
-  // OPT:          [[SCALAR:%.*]] = load i64, i64* %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, <8 x i64>* %0, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8{{$}}
+  // NOOPT-NEXT:   [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8{{$}}
+  // OPT:          [[SCALAR:%.*]] = load i64, ptr %vulli.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:     [[MATRIX:%.*]] = load <8 x i64>, ptr %b.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[SCALAR_EMBED:%.*]] = insertelement <8 x i64> poison, i64 [[SCALAR]], i64 0
   // CHECK-NEXT:   [[SCALAR_EMBED1:%.*]] = shufflevector <8 x i64> [[SCALAR_EMBED]], <8 x i64> poison, <8 x i32> zeroinitializer
   // CHECK-NEXT:   [[RES:%.*]] = sub <8 x i64> [[MATRIX]], [[SCALAR_EMBED1]]
-  // CHECK-NEXT:   store <8 x i64> [[RES]], <8 x i64>* {{.*}}, align 8
+  // CHECK-NEXT:   store <8 x i64> [[RES]], ptr {{.*}}, align 8
 
   b -= vulli;
 }
@@ -587,13 +587,12 @@ void subtract_compound_matrix_scalar_long_long_int_unsigned_long_long(ullx4x2_t
 
 void multiply_matrix_matrix_double(dx5x5_t b, dx5x5_t c) {
   // CHECK-LABEL: @multiply_matrix_matrix_double(
-  // NOOPT:         [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:    [[C:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT:           [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:      [[C:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:    [[C:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT:           [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[C:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[RES:%.*]] = call <25 x double> @llvm.matrix.multiply.v25f64.v25f64.v25f64(<25 x double> [[B]], <25 x double> [[C]], i32 5, i32 5, i32 5)
-  // CHECK-NEXT:    [[A_ADDR:%.*]] = bitcast [25 x double]* %a to <25 x double>*
-  // CHECK-NEXT:    store <25 x double> [[RES]], <25 x double>* [[A_ADDR]], align 8
+  // CHECK-NEXT:    store <25 x double> [[RES]], ptr %a, align 8
   // CHECK:         ret void
   //
 
@@ -603,12 +602,12 @@ void multiply_matrix_matrix_double(dx5x5_t b, dx5x5_t c) {
 
 void multiply_compound_matrix_matrix_double(dx5x5_t b, dx5x5_t c) {
   // CHECK-LABEL: @multiply_compound_matrix_matrix_double(
-  // NOOPT:        [[C:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // NOOPT-NEXT:   [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT:          [[C:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:     [[B:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:        [[C:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // NOOPT-NEXT:   [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT:          [[C:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:     [[B:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:   [[RES:%.*]] = call <25 x double> @llvm.matrix.multiply.v25f64.v25f64.v25f64(<25 x double> [[B]], <25 x double> [[C]], i32 5, i32 5, i32 5)
-  // CHECK-NEXT:   store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:   store <25 x double> [[RES]], ptr {{.*}}, align 8
   // CHECK-NEXT:   ret void
   b *= c;
 }
@@ -616,13 +615,12 @@ void multiply_compound_matrix_matrix_double(dx5x5_t b, dx5x5_t c) {
 typedef int ix3x9_t __attribute__((matrix_type(3, 9)));
 typedef int ix9x9_t __attribute__((matrix_type(9, 9)));
 // CHECK-LABEL: @multiply_matrix_matrix_int(
-// NOOPT:         [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
-// NOOPT-NEXT:    [[C:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
-// OPT:           [[B:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[C:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[B:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
+// NOOPT-NEXT:    [[C:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
+// OPT:           [[B:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[C:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[RES:%.*]] = call <81 x i32> @llvm.matrix.multiply.v81i32.v27i32.v27i32(<27 x i32> [[B]], <27 x i32> [[C]], i32 9, i32 3, i32 9)
-// CHECK-NEXT:    [[A_ADDR:%.*]] = bitcast [81 x i32]* %a to <81 x i32>*
-// CHECK-NEXT:    store <81 x i32> [[RES]], <81 x i32>* [[A_ADDR]], align 4
+// CHECK-NEXT:    store <81 x i32> [[RES]], ptr %a, align 4
 // CHECK:         ret void
 //
 void multiply_matrix_matrix_int(ix9x3_t b, ix3x9_t c) {
@@ -631,15 +629,15 @@ void multiply_matrix_matrix_int(ix9x3_t b, ix3x9_t c) {
 }
 
 // CHECK-LABEL: @multiply_double_matrix_scalar_float(
-// NOOPT:         [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load float, float* %s.addr, align 4{{$}}
-// OPT:           [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[S:%.*]] = load float, float* %s.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load float, ptr %s.addr, align 4{{$}}
+// OPT:           [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[S:%.*]] = load float, ptr %s.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_EXT:%.*]] = fpext float [[S]] to double
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <25 x double> poison, double [[S_EXT]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <25 x double> [[VECINSERT]], <25 x double> poison, <25 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fmul <25 x double> [[A]], [[VECSPLAT]]
-// CHECK-NEXT:    store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+// CHECK-NEXT:    store <25 x double> [[RES]], ptr {{.*}}, align 8
 // CHECK-NEXT:    ret void
 //
 void multiply_double_matrix_scalar_float(dx5x5_t a, float s) {
@@ -647,15 +645,15 @@ void multiply_double_matrix_scalar_float(dx5x5_t a, float s) {
 }
 
 // CHECK-LABEL: @multiply_compound_double_matrix_scalar_float
-// NOOPT:         [[S:%.*]] = load float, float* %s.addr, align 4{{$}}
-// OPT:           [[S:%.*]] = load float, float* %s.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[S:%.*]] = load float, ptr %s.addr, align 4{{$}}
+// OPT:           [[S:%.*]] = load float, ptr %s.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_EXT:%.*]] = fpext float [[S]] to double
-// NOOPT-NEXT:    [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-// OPT-NEXT:      [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT-NEXT:    [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+// OPT-NEXT:      [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <25 x double> poison, double [[S_EXT]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <25 x double> [[VECINSERT]], <25 x double> poison, <25 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fmul <25 x double> [[A]], [[VECSPLAT]]
-// CHECK-NEXT:    store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+// CHECK-NEXT:    store <25 x double> [[RES]], ptr {{.*}}, align 8
 // CHECK-NEXT:    ret void
 //
 void multiply_compound_double_matrix_scalar_float(dx5x5_t a, float s) {
@@ -663,14 +661,14 @@ void multiply_compound_double_matrix_scalar_float(dx5x5_t a, float s) {
 }
 
 // CHECK-LABEL: @multiply_double_matrix_scalar_double(
-// NOOPT:         [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load double, double* %s.addr, align 8{{$}}
-// OPT:           [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[S:%.*]] = load double, double* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load double, ptr %s.addr, align 8{{$}}
+// OPT:           [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[S:%.*]] = load double, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <25 x double> poison, double [[S]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <25 x double> [[VECINSERT]], <25 x double> poison, <25 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fmul <25 x double> [[A]], [[VECSPLAT]]
-// CHECK-NEXT:    store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+// CHECK-NEXT:    store <25 x double> [[RES]], ptr {{.*}}, align 8
 // CHECK-NEXT:    ret void
 //
 void multiply_double_matrix_scalar_double(dx5x5_t a, double s) {
@@ -678,29 +676,29 @@ void multiply_double_matrix_scalar_double(dx5x5_t a, double s) {
 }
 
 // CHECK-LABEL: @multiply_compound_double_matrix_scalar_double(
-// NOOPT:         [[S:%.*]] = load double, double* %s.addr, align 8{{$}}
-// NOOPT-NEXT:    [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-// OPT:           [[S:%.*]] = load double, double* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[S:%.*]] = load double, ptr %s.addr, align 8{{$}}
+// NOOPT-NEXT:    [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+// OPT:           [[S:%.*]] = load double, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <25 x double> poison, double [[S]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <25 x double> [[VECINSERT]], <25 x double> poison, <25 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fmul <25 x double> [[A]], [[VECSPLAT]]
-// CHECK-NEXT:    store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+// CHECK-NEXT:    store <25 x double> [[RES]], ptr {{.*}}, align 8
 // CHECK-NEXT:    ret void
 void multiply_compound_double_matrix_scalar_double(dx5x5_t a, double s) {
   a *= s;
 }
 
 // CHECK-LABEL: @multiply_float_matrix_scalar_double(
-// NOOPT:         [[S:%.*]] = load double, double* %s.addr, align 8{{$}}
-// OPT:           [[S:%.*]] = load double, double* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[S:%.*]] = load double, ptr %s.addr, align 8{{$}}
+// OPT:           [[S:%.*]] = load double, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_TRUNC:%.*]] = fptrunc double [[S]] to float
-// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4{{$}}
-// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <6 x float> poison, float [[S_TRUNC]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <6 x float> [[VECINSERT]], <6 x float> poison, <6 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fmul <6 x float> [[VECSPLAT]], [[MAT]]
-// CHECK-NEXT:    store <6 x float> [[RES]], <6 x float>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <6 x float> [[RES]], ptr [[MAT_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void multiply_float_matrix_scalar_double(fx2x3_t b, double s) {
@@ -708,30 +706,30 @@ void multiply_float_matrix_scalar_double(fx2x3_t b, double s) {
 }
 
 // CHECK-LABEL: @multiply_compound_float_matrix_scalar_double(
-// NOOPT:         [[S:%.*]] = load double, double* %s.addr, align 8{{$}}
-// OPT:           [[S:%.*]] = load double, double* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[S:%.*]] = load double, ptr %s.addr, align 8{{$}}
+// OPT:           [[S:%.*]] = load double, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_TRUNC:%.*]] = fptrunc double [[S]] to float
-// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4{{$}}
-// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <6 x float> poison, float [[S_TRUNC]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <6 x float> [[VECINSERT]], <6 x float> poison, <6 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fmul <6 x float> [[MAT]], [[VECSPLAT]]
-// store <6 x float> %3, <6 x float>* %0, align 4
+// store <6 x float> %3, ptr [[MAT_ADDR]], align 4
 // ret void
 void multiply_compound_float_matrix_scalar_double(fx2x3_t b, double s) {
   b *= s;
 }
 
 // CHECK-LABEL: @multiply_int_matrix_scalar_short(
-// NOOPT:         [[S:%.*]] = load i16, i16* %s.addr, align 2{{$}}
-// OPT:           [[S:%.*]] = load i16, i16* %s.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[S:%.*]] = load i16, ptr %s.addr, align 2{{$}}
+// OPT:           [[S:%.*]] = load i16, ptr %s.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_EXT:%.*]] = sext i16 [[S]] to i32
-// NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-// OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+// OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <27 x i32> poison, i32 [[S_EXT]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <27 x i32> [[VECINSERT]], <27 x i32> poison, <27 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = mul <27 x i32> [[VECSPLAT]], [[MAT]]
-// CHECK-NEXT:    store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void multiply_int_matrix_scalar_short(ix9x3_t b, short s) {
@@ -739,15 +737,15 @@ void multiply_int_matrix_scalar_short(ix9x3_t b, short s) {
 }
 
 // CHECK-LABEL: @multiply_compound_int_matrix_scalar_short(
-// NOOPT:        [[S:%.*]] = load i16, i16* %s.addr, align 2{{$}}
-// OPT:          [[S:%.*]] = load i16, i16* %s.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:        [[S:%.*]] = load i16, ptr %s.addr, align 2{{$}}
+// OPT:          [[S:%.*]] = load i16, ptr %s.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:   [[S_EXT:%.*]] = sext i16 [[S]] to i32
-// NOOPT-NEXT:   [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-// OPT-NEXT:     [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT-NEXT:   [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+// OPT-NEXT:     [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:   [[VECINSERT:%.*]] = insertelement <27 x i32> poison, i32 [[S_EXT]], i64 0
 // CHECK-NEXT:   [[VECSPLAT:%.*]] = shufflevector <27 x i32> [[VECINSERT]], <27 x i32> poison, <27 x i32> zeroinitializer
 // CHECK-NEXT:   [[RES:%.*]] = mul <27 x i32> [[MAT]], [[VECSPLAT]]
-// CHECK-NEXT:   store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:   store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 // CHECK-NEXT:   ret void
 //
 void multiply_compound_int_matrix_scalar_short(ix9x3_t b, short s) {
@@ -755,15 +753,15 @@ void multiply_compound_int_matrix_scalar_short(ix9x3_t b, short s) {
 }
 
 // CHECK-LABEL: @multiply_int_matrix_scalar_ull(
-// NOOPT:         [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-// OPT:           [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load i64, i64* %s.addr, align 8{{$}}
-// OPT-NEXT:      [[S:%.*]] = load i64, i64* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+// OPT:           [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load i64, ptr %s.addr, align 8{{$}}
+// OPT-NEXT:      [[S:%.*]] = load i64, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_TRUNC:%.*]] = trunc i64 [[S]] to i32
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <27 x i32> poison, i32 [[S_TRUNC]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <27 x i32> [[VECINSERT]], <27 x i32> poison, <27 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = mul <27 x i32> [[MAT]], [[VECSPLAT]]
-// CHECK-NEXT:    store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void multiply_int_matrix_scalar_ull(ix9x3_t b, unsigned long long s) {
@@ -772,15 +770,15 @@ void multiply_int_matrix_scalar_ull(ix9x3_t b, unsigned long long s) {
 
 void multiply_compound_int_matrix_scalar_ull(ix9x3_t b, unsigned long long s) {
   // CHECK-LABEL: @multiply_compound_int_matrix_scalar_ull(
-  // NOOPT:         [[S:%.*]] = load i64, i64* %s.addr, align 8{{$}}
-  // OPT:           [[S:%.*]] = load i64, i64* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[S:%.*]] = load i64, ptr %s.addr, align 8{{$}}
+  // OPT:           [[S:%.*]] = load i64, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[S_TRUNC:%.*]] = trunc i64 [[S]] to i32
-  // NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-  // OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+  // OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <27 x i32> poison, i32 [[S_TRUNC]], i64 0
   // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <27 x i32> [[VECINSERT]], <27 x i32> poison, <27 x i32> zeroinitializer
   // CHECK-NEXT:    [[RES:%.*]] = mul <27 x i32> [[MAT]], [[VECSPLAT]]
-  // CHECK-NEXT:    store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:    store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
   // CHECK-NEXT:    ret void
 
   b *= s;
@@ -789,12 +787,11 @@ void multiply_compound_int_matrix_scalar_ull(ix9x3_t b, unsigned long long s) {
 // CHECK-LABEL: @multiply_float_matrix_constant(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca [6 x float], align 4
-// CHECK-NEXT:    [[MAT_ADDR:%.*]] = bitcast [6 x float]* [[A_ADDR]] to <6 x float>*
-// CHECK-NEXT:    store <6 x float> [[A:%.*]], <6 x float>* [[MAT_ADDR]], align 4
-// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR]], align 4{{$}}
-// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// CHECK-NEXT:    store <6 x float> [[A:%.*]], ptr [[A_ADDR]], align 4
+// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr [[A_ADDR]], align 4{{$}}
+// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, ptr [[A_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[RES:%.*]] = fmul <6 x float> [[MAT]], <float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00>
-// CHECK-NEXT:    store <6 x float> [[RES]], <6 x float>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <6 x float> [[RES]], ptr [[A_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void multiply_float_matrix_constant(fx2x3_t a) {
@@ -804,12 +801,11 @@ void multiply_float_matrix_constant(fx2x3_t a) {
 // CHECK-LABEL: @multiply_compound_float_matrix_constant(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca [6 x float], align 4
-// CHECK-NEXT:    [[MAT_ADDR:%.*]] = bitcast [6 x float]* [[A_ADDR]] to <6 x float>*
-// CHECK-NEXT:    store <6 x float> [[A:%.*]], <6 x float>* [[MAT_ADDR]], align 4
-// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR]], align 4{{$}}
-// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// CHECK-NEXT:    store <6 x float> [[A:%.*]], ptr [[A_ADDR]], align 4
+// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr [[A_ADDR]], align 4{{$}}
+// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, ptr [[A_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[RES:%.*]] = fmul <6 x float> [[MAT]], <float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00>
-// CHECK-NEXT:    store <6 x float> [[RES]], <6 x float>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <6 x float> [[RES]], ptr [[A_ADDR]], align 4
 // CHECK-NEXT:    ret void
 void multiply_compound_float_matrix_constant(fx2x3_t a) {
   a *= 2.5;
@@ -818,12 +814,11 @@ void multiply_compound_float_matrix_constant(fx2x3_t a) {
 // CHECK-LABEL: @multiply_int_matrix_constant(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca [27 x i32], align 4
-// CHECK-NEXT:    [[MAT_ADDR:%.*]] = bitcast [27 x i32]* [[A_ADDR]] to <27 x i32>*
-// CHECK-NEXT:    store <27 x i32> [[A:%.*]], <27 x i32>* [[MAT_ADDR]], align 4
-// NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR]], align 4{{$}}
-// OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// CHECK-NEXT:    store <27 x i32> [[A:%.*]], ptr [[A_ADDR]], align 4
+// NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, ptr [[A_ADDR]], align 4{{$}}
+// OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, ptr [[A_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[RES:%.*]] = mul <27 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, [[MAT]]
-// CHECK-NEXT:    store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <27 x i32> [[RES]], ptr [[A_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void multiply_int_matrix_constant(ix9x3_t a) {
@@ -833,12 +828,11 @@ void multiply_int_matrix_constant(ix9x3_t a) {
 // CHECK-LABEL: @multiply_compound_int_matrix_constant(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca [27 x i32], align 4
-// CHECK-NEXT:    [[MAT_ADDR:%.*]] = bitcast [27 x i32]* [[A_ADDR]] to <27 x i32>*
-// CHECK-NEXT:    store <27 x i32> [[A:%.*]], <27 x i32>* [[MAT_ADDR]], align 4
-// NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR]], align 4{{$}}
-// OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// CHECK-NEXT:    store <27 x i32> [[A:%.*]], ptr [[A_ADDR]], align 4
+// NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, ptr [[A_ADDR]], align 4{{$}}
+// OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, ptr [[A_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[RES:%.*]] = mul <27 x i32> [[MAT]], <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
-// CHECK-NEXT:    store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <27 x i32> [[RES]], ptr [[A_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void multiply_compound_int_matrix_constant(ix9x3_t a) {
@@ -846,15 +840,15 @@ void multiply_compound_int_matrix_constant(ix9x3_t a) {
 }
 
 // CHECK-LABEL: @divide_double_matrix_scalar_float(
-// NOOPT:         [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load float, float* %s.addr, align 4{{$}}
-// OPT:           [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[S:%.*]] = load float, float* %s.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load float, ptr %s.addr, align 4{{$}}
+// OPT:           [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[S:%.*]] = load float, ptr %s.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_EXT:%.*]] = fpext float [[S]] to double
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <25 x double> poison, double [[S_EXT]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <25 x double> [[VECINSERT]], <25 x double> poison, <25 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fdiv <25 x double> [[A]], [[VECSPLAT]]
-// CHECK-NEXT:    store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+// CHECK-NEXT:    store <25 x double> [[RES]], ptr {{.*}}, align 8
 // CHECK-NEXT:    ret void
 //
 void divide_double_matrix_scalar_float(dx5x5_t a, float s) {
@@ -862,14 +856,14 @@ void divide_double_matrix_scalar_float(dx5x5_t a, float s) {
 }
 
 // CHECK-LABEL: @divide_double_matrix_scalar_double(
-// NOOPT:         [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load double, double* %s.addr, align 8{{$}}
-// OPT:           [[A:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[S:%.*]] = load double, double* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load double, ptr %s.addr, align 8{{$}}
+// OPT:           [[A:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[S:%.*]] = load double, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <25 x double> poison, double [[S]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <25 x double> [[VECINSERT]], <25 x double> poison, <25 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fdiv <25 x double> [[A]], [[VECSPLAT]]
-// CHECK-NEXT:    store <25 x double> [[RES]], <25 x double>* {{.*}}, align 8
+// CHECK-NEXT:    store <25 x double> [[RES]], ptr {{.*}}, align 8
 // CHECK-NEXT:    ret void
 //
 void divide_double_matrix_scalar_double(dx5x5_t a, double s) {
@@ -877,15 +871,15 @@ void divide_double_matrix_scalar_double(dx5x5_t a, double s) {
 }
 
 // CHECK-LABEL: @divide_float_matrix_scalar_double(
-// NOOPT:         [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load double, double* %s.addr, align 8{{$}}
-// OPT:           [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[S:%.*]] = load double, double* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load double, ptr %s.addr, align 8{{$}}
+// OPT:           [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[S:%.*]] = load double, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_TRUNC:%.*]] = fptrunc double [[S]] to float
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <6 x float> poison, float [[S_TRUNC]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <6 x float> [[VECINSERT]], <6 x float> poison, <6 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = fdiv <6 x float> [[MAT]], [[VECSPLAT]]
-// CHECK-NEXT:    store <6 x float> [[RES]], <6 x float>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <6 x float> [[RES]], ptr [[MAT_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void divide_float_matrix_scalar_double(fx2x3_t b, double s) {
@@ -893,15 +887,15 @@ void divide_float_matrix_scalar_double(fx2x3_t b, double s) {
 }
 
 // CHECK-LABEL: @divide_int_matrix_scalar_short(
-// NOOPT:         [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load i16, i16* %s.addr, align 2{{$}}
-// OPT:           [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[S:%.*]] = load i16, i16* %s.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load i16, ptr %s.addr, align 2{{$}}
+// OPT:           [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[S:%.*]] = load i16, ptr %s.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_EXT:%.*]] = sext i16 [[S]] to i32
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <27 x i32> poison, i32 [[S_EXT]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <27 x i32> [[VECINSERT]], <27 x i32> poison, <27 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = sdiv <27 x i32> [[MAT]], [[VECSPLAT]]
-// CHECK-NEXT:    store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void divide_int_matrix_scalar_short(ix9x3_t b, short s) {
@@ -909,15 +903,15 @@ void divide_int_matrix_scalar_short(ix9x3_t b, short s) {
 }
 
 // CHECK-LABEL: @divide_int_matrix_scalar_ull(
-// NOOPT:         [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load i64, i64* %s.addr, align 8{{$}}
-// OPT:           [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[S:%.*]] = load i64, i64* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load i64, ptr %s.addr, align 8{{$}}
+// OPT:           [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[S:%.*]] = load i64, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[S_TRUNC:%.*]] = trunc i64 [[S]] to i32
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <27 x i32> poison, i32 [[S_TRUNC]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <27 x i32> [[VECINSERT]], <27 x i32> poison, <27 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = sdiv <27 x i32> [[MAT]], [[VECSPLAT]]
-// CHECK-NEXT:    store <27 x i32> [[RES]], <27 x i32>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <27 x i32> [[RES]], ptr [[MAT_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void divide_int_matrix_scalar_ull(ix9x3_t b, unsigned long long s) {
@@ -925,14 +919,14 @@ void divide_int_matrix_scalar_ull(ix9x3_t b, unsigned long long s) {
 }
 
 // CHECK-LABEL: @divide_ull_matrix_scalar_ull(
-// NOOPT:         [[MAT:%.*]] = load <8 x i64>, <8 x i64>* [[MAT_ADDR:%.*]], align 8{{$}}
-// NOOPT-NEXT:    [[S:%.*]] = load i64, i64* %s.addr, align 8{{$}}
-// OPT:           [[MAT:%.*]] = load <8 x i64>, <8 x i64>* [[MAT_ADDR:%.*]], align 8, !tbaa !{{[0-9]+}}{{$}}
-// OPT-NEXT:      [[S:%.*]] = load i64, i64* %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+// NOOPT:         [[MAT:%.*]] = load <8 x i64>, ptr [[MAT_ADDR:%.*]], align 8{{$}}
+// NOOPT-NEXT:    [[S:%.*]] = load i64, ptr %s.addr, align 8{{$}}
+// OPT:           [[MAT:%.*]] = load <8 x i64>, ptr [[MAT_ADDR:%.*]], align 8, !tbaa !{{[0-9]+}}{{$}}
+// OPT-NEXT:      [[S:%.*]] = load i64, ptr %s.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[VECINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[S]], i64 0
 // CHECK-NEXT:    [[VECSPLAT:%.*]] = shufflevector <8 x i64> [[VECINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
 // CHECK-NEXT:    [[RES:%.*]] = udiv <8 x i64> [[MAT]], [[VECSPLAT]]
-// CHECK-NEXT:    store <8 x i64> [[RES]], <8 x i64>* [[MAT_ADDR]], align 8
+// CHECK-NEXT:    store <8 x i64> [[RES]], ptr [[MAT_ADDR]], align 8
 // CHECK-NEXT:    ret void
 //
 void divide_ull_matrix_scalar_ull(ullx4x2_t b, unsigned long long s) {
@@ -942,12 +936,11 @@ void divide_ull_matrix_scalar_ull(ullx4x2_t b, unsigned long long s) {
 // CHECK-LABEL: @divide_float_matrix_constant(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca [6 x float], align 4
-// CHECK-NEXT:    [[MAT_ADDR:%.*]] = bitcast [6 x float]* [[A_ADDR]] to <6 x float>*
-// CHECK-NEXT:    store <6 x float> [[A:%.*]], <6 x float>* [[MAT_ADDR]], align 4
-// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR]], align 4{{$}}
-// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
+// CHECK-NEXT:    store <6 x float> [[A:%.*]], ptr [[A_ADDR]], align 4
+// NOOPT-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr [[A_ADDR]], align 4{{$}}
+// OPT-NEXT:      [[MAT:%.*]] = load <6 x float>, ptr [[A_ADDR]], align 4, !tbaa !{{[0-9]+}}{{$}}
 // CHECK-NEXT:    [[RES:%.*]] = fdiv <6 x float> [[MAT]], <float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00, float 2.500000e+00>
-// CHECK-NEXT:    store <6 x float> [[RES]], <6 x float>* [[MAT_ADDR]], align 4
+// CHECK-NEXT:    store <6 x float> [[RES]], ptr [[A_ADDR]], align 4
 // CHECK-NEXT:    ret void
 //
 void divide_float_matrix_constant(fx2x3_t a) {
@@ -963,11 +956,11 @@ typedef float fx2x3_t __attribute__((matrix_type(2, 3)));
 // matrixes and indices.
 void insert_double_matrix_const_idx_ll_u_double(dx5x5_t a, double d, fx2x3_t b, float e, int j, unsigned k) {
   // CHECK-LABEL: @insert_double_matrix_const_idx_ll_u_double(
-  // NOOPT:         [[D:%.*]] = load double, double* %d.addr, align 8{{$}}
-  // OPT:           [[D:%.*]] = load double, double* %d.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[MAT:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
+  // NOOPT:         [[D:%.*]] = load double, ptr %d.addr, align 8{{$}}
+  // OPT:           [[D:%.*]] = load double, ptr %d.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <25 x double> [[MAT]], double [[D]], i64 5
-  // CHECK-NEXT:    store <25 x double> [[MATINS]], <25 x double>* {{.*}}, align 8
+  // CHECK-NEXT:    store <25 x double> [[MATINS]], ptr {{.*}}, align 8
   // CHECK-NEXT:    ret void
 
   a[0ll][1u] = d;
@@ -975,11 +968,11 @@ void insert_double_matrix_const_idx_ll_u_double(dx5x5_t a, double d, fx2x3_t b,
 
 void insert_double_matrix_const_idx_i_u_double(dx5x5_t a, double d) {
   // CHECK-LABEL: @insert_double_matrix_const_idx_i_u_double(
-  // NOOPT:         [[D:%.*]] = load double, double* %d.addr, align 8{{$}}
-  // OPT:           [[D:%.*]] = load double, double* %d.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[MAT:%.*]] = load <25 x double>, <25 x double>* [[MAT_ADDR:%.*]], align 8{{$}}
+  // NOOPT:         [[D:%.*]] = load double, ptr %d.addr, align 8{{$}}
+  // OPT:           [[D:%.*]] = load double, ptr %d.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <25 x double>, ptr [[MAT_ADDR:%.*]], align 8{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <25 x double> [[MAT]], double [[D]], i64 21
-  // CHECK-NEXT:    store <25 x double> [[MATINS]], <25 x double>* [[MAT_ADDR]], align 8
+  // CHECK-NEXT:    store <25 x double> [[MATINS]], ptr [[MAT_ADDR]], align 8
   // CHECK-NEXT:    ret void
 
   a[1][4u] = d;
@@ -987,11 +980,11 @@ void insert_double_matrix_const_idx_i_u_double(dx5x5_t a, double d) {
 
 void insert_float_matrix_const_idx_ull_i_float(fx2x3_t b, float e) {
   // CHECK-LABEL: @insert_float_matrix_const_idx_ull_i_float(
-  // NOOPT:         [[E:%.*]] = load float, float* %e.addr, align 4{{$}}
-  // OPT:           [[E:%.*]] = load float, float* %e.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4{{$}}
+  // NOOPT:         [[E:%.*]] = load float, ptr %e.addr, align 4{{$}}
+  // OPT:           [[E:%.*]] = load float, ptr %e.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <6 x float> [[MAT]], float [[E]], i64 3
-  // CHECK-NEXT:    store <6 x float> [[MATINS]], <6 x float>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:    store <6 x float> [[MATINS]], ptr [[MAT_ADDR]], align 4
   // CHECK-NEXT:    ret void
 
   b[1ull][1] = e;
@@ -999,21 +992,21 @@ void insert_float_matrix_const_idx_ull_i_float(fx2x3_t b, float e) {
 
 void insert_float_matrix_idx_i_u_float(fx2x3_t b, float e, int j, unsigned k) {
   // CHECK-LABEL: @insert_float_matrix_idx_i_u_float(
-  // NOOPT:         [[E:%.*]] = load float, float* %e.addr, align 4{{$}}
-  // NOOPT-NEXT:    [[J:%.*]] = load i32, i32* %j.addr, align 4{{$}}
-  // OPT:           [[E:%.*]] = load float, float* %e.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:      [[J:%.*]] = load i32, i32* %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[E:%.*]] = load float, ptr %e.addr, align 4{{$}}
+  // NOOPT-NEXT:    [[J:%.*]] = load i32, ptr %j.addr, align 4{{$}}
+  // OPT:           [[E:%.*]] = load float, ptr %e.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[J:%.*]] = load i32, ptr %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[J_EXT:%.*]] = sext i32 [[J]] to i64
-  // NOOPT-NEXT:    [[K:%.*]] = load i32, i32* %k.addr, align 4{{$}}
-  // OPT-NEXT:      [[K:%.*]] = load i32, i32* %k.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:    [[K:%.*]] = load i32, ptr %k.addr, align 4{{$}}
+  // OPT-NEXT:      [[K:%.*]] = load i32, ptr %k.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[K_EXT:%.*]] = zext i32 [[K]] to i64
   // CHECK-NEXT:    [[IDX1:%.*]] = mul i64 [[K_EXT]], 2
   // CHECK-NEXT:    [[IDX2:%.*]] = add i64 [[IDX1]], [[J_EXT]]
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX2]], 6
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // CHECK-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <6 x float> [[MAT]], float [[E]], i64 [[IDX2]]
-  // CHECK-NEXT:    store <6 x float> [[MATINS]], <6 x float>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:    store <6 x float> [[MATINS]], ptr [[MAT_ADDR]], align 4
   // CHECK-NEXT:    ret void
 
   b[j][k] = e;
@@ -1021,20 +1014,20 @@ void insert_float_matrix_idx_i_u_float(fx2x3_t b, float e, int j, unsigned k) {
 
 void insert_float_matrix_idx_s_ull_float(fx2x3_t b, float e, short j, unsigned long long k) {
   // CHECK-LABEL: @insert_float_matrix_idx_s_ull_float(
-  // NOOPT:         [[E:%.*]] = load float, float* %e.addr, align 4{{$}}
-  // NOOPT-NEXT:    [[J:%.*]] = load i16, i16* %j.addr, align 2{{$}}
-  // OPT:           [[E:%.*]] = load float, float* %e.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:      [[J:%.*]] = load i16, i16* %j.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[E:%.*]] = load float, ptr %e.addr, align 4{{$}}
+  // NOOPT-NEXT:    [[J:%.*]] = load i16, ptr %j.addr, align 2{{$}}
+  // OPT:           [[E:%.*]] = load float, ptr %e.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[J:%.*]] = load i16, ptr %j.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[J_EXT:%.*]] = sext i16 [[J]] to i64
-  // NOOPT-NEXT:    [[K:%.*]] = load i64, i64* %k.addr, align 8{{$}}
-  // OPT-NEXT:      [[K:%.*]] = load i64, i64* %k.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:    [[K:%.*]] = load i64, ptr %k.addr, align 8{{$}}
+  // OPT-NEXT:      [[K:%.*]] = load i64, ptr %k.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[IDX1:%.*]] = mul i64 [[K]], 2
   // CHECK-NEXT:    [[IDX2:%.*]] = add i64 [[IDX1]], [[J_EXT]]
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX2]], 6
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // CHECK-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_ADDR:%.*]], align 4{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <6 x float> [[MAT]], float [[E]], i64 [[IDX2]]
-  // CHECK-NEXT:    store <6 x float> [[MATINS]], <6 x float>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:    store <6 x float> [[MATINS]], ptr [[MAT_ADDR]], align 4
   // CHECK-NEXT:    ret void
 
   (b)[j][k] = e;
@@ -1044,18 +1037,18 @@ void insert_float_matrix_idx_s_ull_float(fx2x3_t b, float e, short j, unsigned l
 typedef int ix9x3_t __attribute__((matrix_type(9, 3)));
 void insert_int_idx_expr(ix9x3_t a, int i) {
   // CHECK-LABEL: @insert_int_idx_expr(
-  // NOOPT:         [[I1:%.*]] = load i32, i32* %i.addr, align 4{{$}}
-  // NOOPT-NEXT:    [[I2:%.*]] = load i32, i32* %i.addr, align 4{{$}}
-  // OPT:           [[I1:%.*]] = load i32, i32* %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:      [[I2:%.*]] = load i32, i32* %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[I1:%.*]] = load i32, ptr %i.addr, align 4{{$}}
+  // NOOPT-NEXT:    [[I2:%.*]] = load i32, ptr %i.addr, align 4{{$}}
+  // OPT:           [[I1:%.*]] = load i32, ptr %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[I2:%.*]] = load i32, ptr %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[I2_ADD:%.*]] = add nsw i32 4, [[I2]]
   // CHECK-NEXT:    [[ADD_EXT:%.*]] = sext i32 [[I2_ADD]] to i64
   // CHECK-NEXT:    [[IDX2:%.*]] = add i64 18, [[ADD_EXT]]
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX2]], 27
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // CHECK-NEXT:    [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR:%.*]], align 4{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <27 x i32> [[MAT]], i32 [[I1]], i64 [[IDX2]]
-  // CHECK-NEXT:    store <27 x i32> [[MATINS]], <27 x i32>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:    store <27 x i32> [[MATINS]], ptr [[MAT_ADDR]], align 4
   // CHECK-NEXT:    ret void
 
   a[4 + i][1 + 1u] = i;
@@ -1066,14 +1059,13 @@ void insert_int_idx_expr(ix9x3_t a, int i) {
 typedef int ix9x3_t __attribute__((matrix_type(9, 3)));
 void insert_float_into_int_matrix(ix9x3_t *a, int i) {
   // CHECK-LABEL: @insert_float_into_int_matrix(
-  // NOOPT:         [[I:%.*]] = load i32, i32* %i.addr, align 4{{$}}
-  // OPT:           [[I:%.*]] = load i32, i32* %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // NOOPT-NEXT:    [[MAT_ADDR1:%.*]] = load [27 x i32]*, [27 x i32]** %a.addr, align 8{{$}}
-  // OPT-NEXT:      [[MAT_ADDR1:%.*]] = load [27 x i32]*, [27 x i32]** %a.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[MAT_ADDR2:%.*]] = bitcast [27 x i32]* [[MAT_ADDR1]] to <27 x i32>*
-  // CHECK-NEXT:    [[MAT:%.*]] = load <27 x i32>, <27 x i32>* [[MAT_ADDR2]], align 4{{$}}
+  // NOOPT:         [[I:%.*]] = load i32, ptr %i.addr, align 4{{$}}
+  // OPT:           [[I:%.*]] = load i32, ptr %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:    [[MAT_ADDR1:%.*]] = load ptr, ptr %a.addr, align 8{{$}}
+  // OPT-NEXT:      [[MAT_ADDR1:%.*]] = load ptr, ptr %a.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <27 x i32>, ptr [[MAT_ADDR1]], align 4{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <27 x i32> [[MAT]], i32 [[I]], i64 13
-  // CHECK-NEXT:    store <27 x i32> [[MATINS]], <27 x i32>* [[MAT_ADDR2]], align 4
+  // CHECK-NEXT:    store <27 x i32> [[MATINS]], ptr [[MAT_ADDR1]], align 4
   // CHECK-NEXT:    ret void
 
   (*a)[4][1] = i;
@@ -1085,11 +1077,11 @@ typedef double dx3x3_t __attribute__((matrix_type(3, 3)));
 typedef float fx3x3_t __attribute__((matrix_type(3, 3)));
 void insert_matching_dimensions1(dx3x3_t a, double i) {
   // CHECK-LABEL: @insert_matching_dimensions1(
-  // NOOPT:         [[I:%.*]] = load double, double* %i.addr, align 8{{$}}
-  // OPT:           [[I:%.*]] = load double, double* %i.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[MAT:%.*]] = load <9 x double>, <9 x double>* [[MAT_ADDR:%.*]], align 8{{$}}
+  // NOOPT:         [[I:%.*]] = load double, ptr %i.addr, align 8{{$}}
+  // OPT:           [[I:%.*]] = load double, ptr %i.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <9 x double>, ptr [[MAT_ADDR:%.*]], align 8{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <9 x double> [[MAT]], double [[I]], i64 5
-  // CHECK-NEXT:    store <9 x double> [[MATINS]], <9 x double>* [[MAT_ADDR]], align 8
+  // CHECK-NEXT:    store <9 x double> [[MATINS]], ptr [[MAT_ADDR]], align 8
   // CHECK-NEXT:    ret void
 
   a[2u][1u] = i;
@@ -1097,11 +1089,11 @@ void insert_matching_dimensions1(dx3x3_t a, double i) {
 
 void insert_matching_dimensions(fx3x3_t b, float e) {
   // CHECK-LABEL: @insert_matching_dimensions(
-  // NOOPT:         [[E:%.*]] = load float, float* %e.addr, align 4{{$}}
-  // OPT:           [[E:%.*]] = load float, float* %e.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[MAT:%.*]] = load <9 x float>, <9 x float>* [[MAT_ADDR:%.*]], align 4{{$}}
+  // NOOPT:         [[E:%.*]] = load float, ptr %e.addr, align 4{{$}}
+  // OPT:           [[E:%.*]] = load float, ptr %e.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <9 x float>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <9 x float> [[MAT]], float [[E]], i64 7
-  // CHECK-NEXT:    store <9 x float> [[MATINS]], <9 x float>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:    store <9 x float> [[MATINS]], ptr [[MAT_ADDR]], align 4
   // CHECK-NEXT:    ret void
 
   b[1u][2u] = e;
@@ -1109,8 +1101,8 @@ void insert_matching_dimensions(fx3x3_t b, float e) {
 
 double extract_double(dx5x5_t a) {
   // CHECK-LABEL: @extract_double(
-  // NOOPT:         [[MAT:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8{{$}}
-  // OPT:           [[MAT:%.*]] = load <25 x double>, <25 x double>* {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[MAT:%.*]] = load <25 x double>, ptr {{.*}}, align 8{{$}}
+  // OPT:           [[MAT:%.*]] = load <25 x double>, ptr {{.*}}, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[MATEXT:%.*]] = extractelement <25 x double> [[MAT]], i64 12
   // CHECK-NEXT:    ret double [[MATEXT]]
 
@@ -1119,8 +1111,8 @@ double extract_double(dx5x5_t a) {
 
 double extract_float(fx3x3_t b) {
   // CHECK-LABEL: @extract_float(
-  // NOOPT:         [[MAT:%.*]] = load <9 x float>, <9 x float>* {{.*}}, align 4{{$}}
-  // OPT:           [[MAT:%.*]] = load <9 x float>, <9 x float>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[MAT:%.*]] = load <9 x float>, ptr {{.*}}, align 4{{$}}
+  // OPT:           [[MAT:%.*]] = load <9 x float>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[MATEXT:%.*]] = extractelement <9 x float> [[MAT]], i64 5
   // CHECK-NEXT:    [[TO_DOUBLE:%.*]] = fpext float [[MATEXT]] to double
   // CHECK-NEXT:    ret double [[TO_DOUBLE]]
@@ -1130,16 +1122,16 @@ double extract_float(fx3x3_t b) {
 
 int extract_int(ix9x3_t c, unsigned long j) {
   // CHECK-LABEL: @extract_int(
-  // NOOPT:         [[J1:%.*]] = load i64, i64* %j.addr, align 8{{$}}
-  // NOOPT-NEXT:    [[J2:%.*]] = load i64, i64* %j.addr, align 8{{$}}
-  // OPT:           [[J1:%.*]] = load i64, i64* %j.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // OPT-NEXT:      [[J2:%.*]] = load i64, i64* %j.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[J1:%.*]] = load i64, ptr %j.addr, align 8{{$}}
+  // NOOPT-NEXT:    [[J2:%.*]] = load i64, ptr %j.addr, align 8{{$}}
+  // OPT:           [[J1:%.*]] = load i64, ptr %j.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[J2:%.*]] = load i64, ptr %j.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[IDX1:%.*]] = mul i64 [[J2]], 9
   // CHECK-NEXT:    [[IDX2:%.*]] = add i64 [[IDX1]], [[J1]]
-  // NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:    [[MAT:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX2]], 27
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[MAT:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[MATEXT:%.*]] = extractelement <27 x i32> [[MAT]], i64 [[IDX2]]
   // CHECK-NEXT:    ret i32 [[MATEXT]]
 
@@ -1150,21 +1142,20 @@ typedef double dx3x2_t __attribute__((matrix_type(3, 2)));
 
 double test_extract_matrix_pointer1(dx3x2_t **ptr, unsigned j) {
   // CHECK-LABEL: @test_extract_matrix_pointer1(
-  // NOOPT:         [[J:%.*]] = load i32, i32* %j.addr, align 4{{$}}
-  // OPT:           [[J:%.*]] = load i32, i32* %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[J:%.*]] = load i32, ptr %j.addr, align 4{{$}}
+  // OPT:           [[J:%.*]] = load i32, ptr %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[J_EXT:%.*]] = zext i32 [[J]] to i64
   // CHECK-NEXT:    [[IDX:%.*]] = add i64 3, [[J_EXT]]
-  // NOOPT-NEXT:    [[PTR:%.*]] = load [6 x double]**, [6 x double]*** %ptr.addr, align 8{{$}}
+  // NOOPT-NEXT:    [[PTR:%.*]] = load ptr, ptr %ptr.addr, align 8{{$}}
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX]], 6
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // OPT-NEXT:      [[PTR:%.*]] = load [6 x double]**, [6 x double]*** %ptr.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[PTR_IDX:%.*]] = getelementptr inbounds [6 x double]*, [6 x double]** [[PTR]], i64 1
-  // NOOPT-NEXT:    [[PTR2:%.*]] = load [6 x double]*, [6 x double]** [[PTR_IDX]], align 8{{$}}
-  // OPT-NEXT:      [[PTR2:%.*]] = load [6 x double]*, [6 x double]** [[PTR_IDX]], align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[PTR2_IDX:%.*]] = getelementptr inbounds [6 x double], [6 x double]* [[PTR2]], i64 2
-  // CHECK-NEXT:    [[MAT_ADDR:%.*]] = bitcast [6 x double]* [[PTR2_IDX]] to <6 x double>*
-  // NOOPT-NEXT:    [[MAT:%.*]] = load <6 x double>, <6 x double>* [[MAT_ADDR]], align 8{{$}}
-  // OPT-NEXT:      [[MAT:%.*]] = load <6 x double>, <6 x double>* [[MAT_ADDR]], align 8, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[PTR:%.*]] = load ptr, ptr %ptr.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[PTR_IDX:%.*]] = getelementptr inbounds ptr, ptr [[PTR]], i64 1
+  // NOOPT-NEXT:    [[PTR2:%.*]] = load ptr, ptr [[PTR_IDX]], align 8{{$}}
+  // OPT-NEXT:      [[PTR2:%.*]] = load ptr, ptr [[PTR_IDX]], align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[PTR2_IDX:%.*]] = getelementptr inbounds [6 x double], ptr [[PTR2]], i64 2
+  // NOOPT-NEXT:    [[MAT:%.*]] = load <6 x double>, ptr [[PTR2_IDX]], align 8{{$}}
+  // OPT-NEXT:      [[MAT:%.*]] = load <6 x double>, ptr [[PTR2_IDX]], align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[MATEXT:%.*]] = extractelement <6 x double> [[MAT]], i64 [[IDX]]
   // CHECK-NEXT:    ret double [[MATEXT]]
 
@@ -1174,15 +1165,14 @@ double test_extract_matrix_pointer1(dx3x2_t **ptr, unsigned j) {
 double test_extract_matrix_pointer2(dx3x2_t **ptr) {
   // CHECK-LABEL: @test_extract_matrix_pointer2(
   // CHECK-NEXT:  entry:
-  // NOOPT:         [[PTR:%.*]] = load [6 x double]**, [6 x double]*** %ptr.addr, align 8{{$}}
-  // OPT:           [[PTR:%.*]] = load [6 x double]**, [6 x double]*** %ptr.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[PTR_IDX:%.*]] = getelementptr inbounds [6 x double]*, [6 x double]** [[PTR]], i64 4
-  // NOOPT-NEXT:    [[PTR2:%.*]] = load [6 x double]*, [6 x double]** [[PTR_IDX]], align 8{{$}}
-  // OPT-NEXT:      [[PTR2:%.*]] = load [6 x double]*, [6 x double]** [[PTR_IDX]], align 8, !tbaa !{{[0-9]+}}{{$}}
-  // CHECK-NEXT:    [[PTR2_IDX:%.*]] = getelementptr inbounds [6 x double], [6 x double]* [[PTR2]], i64 6
-  // CHECK-NEXT:    [[MAT_ADDR:%.*]] = bitcast [6 x double]* [[PTR2_IDX]] to <6 x double>*
-  // NOOPT-NEXT:    [[MAT:%.*]] = load <6 x double>, <6 x double>* [[MAT_ADDR]], align 8{{$}}
-  // OPT-NEXT:      [[MAT:%.*]] = load <6 x double>, <6 x double>* [[MAT_ADDR]], align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[PTR:%.*]] = load ptr, ptr %ptr.addr, align 8{{$}}
+  // OPT:           [[PTR:%.*]] = load ptr, ptr %ptr.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[PTR_IDX:%.*]] = getelementptr inbounds ptr, ptr [[PTR]], i64 4
+  // NOOPT-NEXT:    [[PTR2:%.*]] = load ptr, ptr [[PTR_IDX]], align 8{{$}}
+  // OPT-NEXT:      [[PTR2:%.*]] = load ptr, ptr [[PTR_IDX]], align 8, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-NEXT:    [[PTR2_IDX:%.*]] = getelementptr inbounds [6 x double], ptr [[PTR2]], i64 6
+  // NOOPT-NEXT:    [[MAT:%.*]] = load <6 x double>, ptr [[PTR2_IDX]], align 8{{$}}
+  // OPT-NEXT:      [[MAT:%.*]] = load <6 x double>, ptr [[PTR2_IDX]], align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[MATEXT:%.*]] = extractelement <6 x double> [[MAT]], i64 5
   // CHECK-NEXT:    ret double [[MATEXT]]
 
@@ -1191,25 +1181,25 @@ double test_extract_matrix_pointer2(dx3x2_t **ptr) {
 
 void insert_extract(dx5x5_t a, fx3x3_t b, unsigned long j, short k) {
   // CHECK-LABEL: @insert_extract(
-  // NOOPT:         [[K:%.*]] = load i16, i16* %k.addr, align 2{{$}}
-  // OPT:           [[K:%.*]] = load i16, i16* %k.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:         [[K:%.*]] = load i16, ptr %k.addr, align 2{{$}}
+  // OPT:           [[K:%.*]] = load i16, ptr %k.addr, align 2, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[K_EXT:%.*]] = sext i16 [[K]] to i64
   // CHECK-NEXT:    [[IDX1:%.*]] = mul i64 [[K_EXT]], 3
   // CHECK-NEXT:    [[IDX2:%.*]] = add i64 [[IDX1]], 0
-  // NOOPT-NEXT:    [[MAT:%.*]] = load <9 x float>, <9 x float>* [[MAT_ADDR:%.*]], align 4{{$}}
+  // NOOPT-NEXT:    [[MAT:%.*]] = load <9 x float>, ptr [[MAT_ADDR:%.*]], align 4{{$}}
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX2]], 9
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // OPT-NEXT:      [[MAT:%.*]] = load <9 x float>, <9 x float>* [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:      [[MAT:%.*]] = load <9 x float>, ptr [[MAT_ADDR:%.*]], align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[MATEXT:%.*]] = extractelement <9 x float> [[MAT]], i64 [[IDX2]]
-  // NOOPT-NEXT:    [[J:%.*]] = load i64, i64* %j.addr, align 8{{$}}
-  // OPT-NEXT:      [[J:%.*]] = load i64, i64* %j.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:    [[J:%.*]] = load i64, ptr %j.addr, align 8{{$}}
+  // OPT-NEXT:      [[J:%.*]] = load i64, ptr %j.addr, align 8, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[IDX3:%.*]] = mul i64 [[J]], 3
   // CHECK-NEXT:    [[IDX4:%.*]] = add i64 [[IDX3]], 2
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX4]], 9
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // CHECK-NEXT:    [[MAT2:%.*]] = load <9 x float>, <9 x float>* [[MAT_ADDR]], align 4{{$}}
+  // CHECK-NEXT:    [[MAT2:%.*]] = load <9 x float>, ptr [[MAT_ADDR]], align 4{{$}}
   // CHECK-NEXT:    [[MATINS:%.*]] = insertelement <9 x float> [[MAT2]], float [[MATEXT]], i64 [[IDX4]]
-  // CHECK-NEXT:    store <9 x float> [[MATINS]], <9 x float>* [[MAT_ADDR]], align 4
+  // CHECK-NEXT:    store <9 x float> [[MATINS]], ptr [[MAT_ADDR]], align 4
   // CHECK-NEXT:    ret void
 
   b[2][j] = b[0][k];
@@ -1217,12 +1207,12 @@ void insert_extract(dx5x5_t a, fx3x3_t b, unsigned long j, short k) {
 
 void insert_compound_stmt(dx5x5_t a) {
   // CHECK-LABEL: define{{.*}} void @insert_compound_stmt(<25 x double> noundef %a)
-  // CHECK:        [[A:%.*]] = load <25 x double>, <25 x double>* [[A_PTR:%.*]], align 8{{$}}
+  // CHECK:        [[A:%.*]] = load <25 x double>, ptr [[A_PTR:%.*]], align 8{{$}}
   // CHECK-NEXT:   [[EXT:%.*]] = extractelement <25 x double> [[A]], i64 17
   // CHECK-NEXT:   [[SUB:%.*]] = fsub double [[EXT]], 1.000000e+00
-  // CHECK-NEXT:   [[A2:%.*]] = load <25 x double>, <25 x double>* [[A_PTR]], align 8{{$}}
+  // CHECK-NEXT:   [[A2:%.*]] = load <25 x double>, ptr [[A_PTR]], align 8{{$}}
   // CHECK-NEXT:   [[INS:%.*]] = insertelement <25 x double> [[A2]], double [[SUB]], i64 17
-  // CHECK-NEXT:   store <25 x double> [[INS]], <25 x double>* [[A_PTR]], align 8
+  // CHECK-NEXT:   store <25 x double> [[INS]], ptr [[A_PTR]], align 8
   // CHECK-NEXT:   ret void
 
   a[2][3] -= 1.0;
@@ -1233,26 +1223,25 @@ struct Foo {
 };
 
 void insert_compound_stmt_field(struct Foo *a, float f, unsigned i, unsigned j) {
-  // CHECK-LABEL: define{{.*}} void @insert_compound_stmt_field(%struct.Foo* noundef %a, float noundef %f, i32 noundef %i, i32 noundef %j)
-  // NOOPT:         [[I:%.*]] = load i32, i32* %i.addr, align 4{{$}}
-  // OPT:           [[I:%.*]] = load i32, i32* %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // CHECK-LABEL: define{{.*}} void @insert_compound_stmt_field(ptr noundef %a, float noundef %f, i32 noundef %i, i32 noundef %j)
+  // NOOPT:         [[I:%.*]] = load i32, ptr %i.addr, align 4{{$}}
+  // OPT:           [[I:%.*]] = load i32, ptr %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[I_EXT:%.*]] = zext i32 [[I]] to i64
-  // NOOPT-NEXT:    [[J:%.*]] = load i32, i32* %j.addr, align 4{{$}}
-  // OPT-NEXT:      [[J:%.*]] = load i32, i32* %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:    [[J:%.*]] = load i32, ptr %j.addr, align 4{{$}}
+  // OPT-NEXT:      [[J:%.*]] = load i32, ptr %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:    [[J_EXT:%.*]] = zext i32 [[J]] to i64
   // CHECK-NEXT:    [[IDX1:%.*]] = mul i64 [[J_EXT]], 2
   // CHECK-NEXT:    [[IDX2:%.*]] = add i64 [[IDX1]], [[I_EXT]]
-  // CHECK-NEXT:    [[MAT_PTR:%.*]] = bitcast [6 x float]* %mat to <6 x float>*
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX2]], 6
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // CHECK-NEXT:    [[MAT:%.*]] = load <6 x float>, <6 x float>* [[MAT_PTR]], align 4{{$}}
+  // CHECK-NEXT:    [[MAT:%.*]] = load <6 x float>, ptr %mat, align 4{{$}}
   // CHECK-NEXT:    [[EXT:%.*]] = extractelement <6 x float> [[MAT]], i64 [[IDX2]]
   // CHECK-NEXT:    [[SUM:%.*]] = fadd float [[EXT]], {{.*}}
   // OPT-NEXT:      [[CMP:%.*]] = icmp ult i64 [[IDX2]], 6
   // OPT-NEXT:      call void @llvm.assume(i1 [[CMP]])
-  // CHECK-NEXT:    [[MAT2:%.*]] = load <6 x float>, <6 x float>* [[MAT_PTR]], align 4{{$}}
+  // CHECK-NEXT:    [[MAT2:%.*]] = load <6 x float>, ptr %mat, align 4{{$}}
   // CHECK-NEXT:    [[INS:%.*]] = insertelement <6 x float> [[MAT2]], float [[SUM]], i64 [[IDX2]]
-  // CHECK-NEXT:    store <6 x float> [[INS]], <6 x float>* [[MAT_PTR]], align 4
+  // CHECK-NEXT:    store <6 x float> [[INS]], ptr %mat, align 4
   // CHECK-NEXT:    ret void
 
   a->mat[i][j] += f;
@@ -1260,32 +1249,32 @@ void insert_compound_stmt_field(struct Foo *a, float f, unsigned i, unsigned j)
 
 void matrix_as_idx(ix9x3_t a, int i, int j, dx5x5_t b) {
   // CHECK-LABEL: define{{.*}} void @matrix_as_idx(<27 x i32> noundef %a, i32 noundef %i, i32 noundef %j, <25 x double> noundef %b)
-  // NOOPT:       [[I1:%.*]] = load i32, i32* %i.addr, align 4{{$}}
-  // OPT:         [[I1:%.*]] = load i32, i32* %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT:       [[I1:%.*]] = load i32, ptr %i.addr, align 4{{$}}
+  // OPT:         [[I1:%.*]] = load i32, ptr %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[I1_EXT:%.*]] = sext i32 [[I1]] to i64
-  // NOOPT-NEXT:  [[J1:%.*]] = load i32, i32* %j.addr, align 4{{$}}
-  // OPT-NEXT:    [[J1:%.*]] = load i32, i32* %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[J1:%.*]] = load i32, ptr %j.addr, align 4{{$}}
+  // OPT-NEXT:    [[J1:%.*]] = load i32, ptr %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[J1_EXT:%.*]] = sext i32 [[J1]] to i64
   // CHECK-NEXT:  [[IDX1_1:%.*]] = mul i64 [[J1_EXT]], 9
   // CHECK-NEXT:  [[IDX1_2:%.*]] = add i64 [[IDX1_1]], [[I1_EXT]]
-  // NOOPT-NEXT:  [[A:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4{{$}}
+  // NOOPT-NEXT:  [[A:%.*]] = load <27 x i32>, ptr %a.addr, align 4{{$}}
   // OPT-NEXT:    [[CMP:%.*]] = icmp ult i64 [[IDX1_2]], 27
   // OPT-NEXT:    call void @llvm.assume(i1 [[CMP]])
-  // OPT-NEXT:    [[A:%.*]] = load <27 x i32>, <27 x i32>* %0, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A:%.*]] = load <27 x i32>, ptr %a.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[MI1:%.*]] = extractelement <27 x i32> [[A]], i64 [[IDX1_2]]
   // CHECK-NEXT:  [[MI1_EXT:%.*]] = sext i32 [[MI1]] to i64
-  // NOOPT-NEXT:  [[J2:%.*]] = load i32, i32* %j.addr, align 4{{$}}
-  // OPT-NEXT:    [[J2:%.*]] = load i32, i32* %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[J2:%.*]] = load i32, ptr %j.addr, align 4{{$}}
+  // OPT-NEXT:    [[J2:%.*]] = load i32, ptr %j.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[J2_EXT:%.*]] = sext i32 [[J2]] to i64
-  // NOOPT-NEXT:  [[I2:%.*]] = load i32, i32* %i.addr, align 4{{$}}
-  // OPT-NEXT:    [[I2:%.*]] = load i32, i32* %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // NOOPT-NEXT:  [[I2:%.*]] = load i32, ptr %i.addr, align 4{{$}}
+  // OPT-NEXT:    [[I2:%.*]] = load i32, ptr %i.addr, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[I2_EXT:%.*]] = sext i32 [[I2]] to i64
   // CHECK-NEXT:  [[IDX2_1:%.*]] = mul i64 [[I2_EXT]], 9
   // CHECK-NEXT:  [[IDX2_2:%.*]] = add i64 [[IDX2_1]], [[J2_EXT]]
-  // NOOPT-NEXT:  [[A2:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4{{$}}
+  // NOOPT-NEXT:  [[A2:%.*]] = load <27 x i32>, ptr {{.*}}, align 4{{$}}
   // OPT-NEXT:    [[CMP:%.*]] = icmp ult i64 [[IDX2_2]], 27
   // OPT-NEXT:    call void @llvm.assume(i1 [[CMP]])
-  // OPT-NEXT:    [[A2:%.*]] = load <27 x i32>, <27 x i32>* {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
+  // OPT-NEXT:    [[A2:%.*]] = load <27 x i32>, ptr {{.*}}, align 4, !tbaa !{{[0-9]+}}{{$}}
   // CHECK-NEXT:  [[MI2:%.*]] = extractelement <27 x i32> [[A2]], i64 [[IDX2_2]]
   // CHECK-NEXT:  [[MI3:%.*]] = add nsw i32 [[MI2]], 2
   // CHECK-NEXT:  [[MI3_EXT:%.*]] = sext i32 [[MI3]] to i64
@@ -1293,8 +1282,8 @@ void matrix_as_idx(ix9x3_t a, int i, int j, dx5x5_t b) {
   // CHECK-NEXT:  [[IDX3_2:%.*]] = add i64 [[IDX3_1]], [[MI1_EXT]]
   // OPT-NEXT:    [[CMP:%.*]] = icmp ult i64 [[IDX3_2]], 25
   // OPT-NEXT:    call void @llvm.assume(i1 [[CMP]])
-  // CHECK-NEXT:  [[B:%.*]] = load <25 x double>, <25 x double>* [[B_PTR:%.*]], align 8{{$}}
+  // CHECK-NEXT:  [[B:%.*]] = load <25 x double>, ptr [[B_PTR:%.*]], align 8{{$}}
   // CHECK-NEXT:  [[INS:%.*]] = insertelement <25 x double> [[B]], double 1.500000e+00, i64 [[IDX3_2]]
-  // CHECK-NEXT:  store <25 x double> [[INS]], <25 x double>* [[B_PTR]], align 8
+  // CHECK-NEXT:  store <25 x double> [[INS]], ptr [[B_PTR]], align 8
   b[a[i][j]][a[j][i] + 2] = 1.5;
 }

diff  --git a/clang/test/CodeGen/matrix-type.c b/clang/test/CodeGen/matrix-type.c
index 5025a344a0efd..e52dc20229db9 100644
--- a/clang/test/CodeGen/matrix-type.c
+++ b/clang/test/CodeGen/matrix-type.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
+// RUN: %clang_cc1 -fenable-matrix -triple x86_64-apple-darwin %s -emit-llvm -disable-llvm-passes -o - | FileCheck %s
 
 #if !__has_extension(matrix_types)
 #error Expected extension 'matrix_types' to be enabled
@@ -15,16 +15,14 @@ typedef double dx5x5_t __attribute__((matrix_type(5, 5)));
 void load_store_double(dx5x5_t *a, dx5x5_t *b) {
   // CHECK-LABEL:  define{{.*}} void @load_store_double(
   // CHECK-NEXT:  entry:
-  // CHECK-NEXT:    %a.addr = alloca [25 x double]*, align 8
-  // CHECK-NEXT:    %b.addr = alloca [25 x double]*, align 8
-  // CHECK-NEXT:    store [25 x double]* %a, [25 x double]** %a.addr, align 8
-  // CHECK-NEXT:    store [25 x double]* %b, [25 x double]** %b.addr, align 8
-  // CHECK-NEXT:    %0 = load [25 x double]*, [25 x double]** %b.addr, align 8
-  // CHECK-NEXT:    %1 = bitcast [25 x double]* %0 to <25 x double>*
-  // CHECK-NEXT:    %2 = load <25 x double>, <25 x double>* %1, align 8
-  // CHECK-NEXT:    %3 = load [25 x double]*, [25 x double]** %a.addr, align 8
-  // CHECK-NEXT:    %4 = bitcast [25 x double]* %3 to <25 x double>*
-  // CHECK-NEXT:    store <25 x double> %2, <25 x double>* %4, align 8
+  // CHECK-NEXT:    %a.addr = alloca ptr, align 8
+  // CHECK-NEXT:    %b.addr = alloca ptr, align 8
+  // CHECK-NEXT:    store ptr %a, ptr %a.addr, align 8
+  // CHECK-NEXT:    store ptr %b, ptr %b.addr, align 8
+  // CHECK-NEXT:    %0 = load ptr, ptr %b.addr, align 8
+  // CHECK-NEXT:    %1 = load <25 x double>, ptr %0, align 8
+  // CHECK-NEXT:    %2 = load ptr, ptr %a.addr, align 8
+  // CHECK-NEXT:    store <25 x double> %1, ptr %2, align 8
   // CHECK-NEXT:   ret void
 
   *a = *b;
@@ -34,16 +32,14 @@ typedef float fx3x4_t __attribute__((matrix_type(3, 4)));
 void load_store_float(fx3x4_t *a, fx3x4_t *b) {
   // CHECK-LABEL:  define{{.*}} void @load_store_float(
   // CHECK-NEXT:  entry:
-  // CHECK-NEXT:    %a.addr = alloca [12 x float]*, align 8
-  // CHECK-NEXT:    %b.addr = alloca [12 x float]*, align 8
-  // CHECK-NEXT:    store [12 x float]* %a, [12 x float]** %a.addr, align 8
-  // CHECK-NEXT:    store [12 x float]* %b, [12 x float]** %b.addr, align 8
-  // CHECK-NEXT:    %0 = load [12 x float]*, [12 x float]** %b.addr, align 8
-  // CHECK-NEXT:    %1 = bitcast [12 x float]* %0 to <12 x float>*
-  // CHECK-NEXT:    %2 = load <12 x float>, <12 x float>* %1, align 4
-  // CHECK-NEXT:    %3 = load [12 x float]*, [12 x float]** %a.addr, align 8
-  // CHECK-NEXT:    %4 = bitcast [12 x float]* %3 to <12 x float>*
-  // CHECK-NEXT:    store <12 x float> %2, <12 x float>* %4, align 4
+  // CHECK-NEXT:    %a.addr = alloca ptr, align 8
+  // CHECK-NEXT:    %b.addr = alloca ptr, align 8
+  // CHECK-NEXT:    store ptr %a, ptr %a.addr, align 8
+  // CHECK-NEXT:    store ptr %b, ptr %b.addr, align 8
+  // CHECK-NEXT:    %0 = load ptr, ptr %b.addr, align 8
+  // CHECK-NEXT:    %1 = load <12 x float>, ptr %0, align 4
+  // CHECK-NEXT:    %2 = load ptr, ptr %a.addr, align 8
+  // CHECK-NEXT:    store <12 x float> %1, ptr %2, align 4
   // CHECK-NEXT:   ret void
 
   *a = *b;
@@ -53,16 +49,14 @@ typedef int ix3x4_t __attribute__((matrix_type(4, 3)));
 void load_store_int(ix3x4_t *a, ix3x4_t *b) {
   // CHECK-LABEL:  define{{.*}} void @load_store_int(
   // CHECK-NEXT:  entry:
-  // CHECK-NEXT:    %a.addr = alloca [12 x i32]*, align 8
-  // CHECK-NEXT:    %b.addr = alloca [12 x i32]*, align 8
-  // CHECK-NEXT:    store [12 x i32]* %a, [12 x i32]** %a.addr, align 8
-  // CHECK-NEXT:    store [12 x i32]* %b, [12 x i32]** %b.addr, align 8
-  // CHECK-NEXT:    %0 = load [12 x i32]*, [12 x i32]** %b.addr, align 8
-  // CHECK-NEXT:    %1 = bitcast [12 x i32]* %0 to <12 x i32>*
-  // CHECK-NEXT:    %2 = load <12 x i32>, <12 x i32>* %1, align 4
-  // CHECK-NEXT:    %3 = load [12 x i32]*, [12 x i32]** %a.addr, align 8
-  // CHECK-NEXT:    %4 = bitcast [12 x i32]* %3 to <12 x i32>*
-  // CHECK-NEXT:    store <12 x i32> %2, <12 x i32>* %4, align 4
+  // CHECK-NEXT:    %a.addr = alloca ptr, align 8
+  // CHECK-NEXT:    %b.addr = alloca ptr, align 8
+  // CHECK-NEXT:    store ptr %a, ptr %a.addr, align 8
+  // CHECK-NEXT:    store ptr %b, ptr %b.addr, align 8
+  // CHECK-NEXT:    %0 = load ptr, ptr %b.addr, align 8
+  // CHECK-NEXT:    %1 = load <12 x i32>, ptr %0, align 4
+  // CHECK-NEXT:    %2 = load ptr, ptr %a.addr, align 8
+  // CHECK-NEXT:    store <12 x i32> %1, ptr %2, align 4
   // CHECK-NEXT:   ret void
 
   *a = *b;
@@ -72,16 +66,14 @@ typedef unsigned long long ullx3x4_t __attribute__((matrix_type(4, 3)));
 void load_store_ull(ullx3x4_t *a, ullx3x4_t *b) {
   // CHECK-LABEL:  define{{.*}} void @load_store_ull(
   // CHECK-NEXT:  entry:
-  // CHECK-NEXT:    %a.addr = alloca [12 x i64]*, align 8
-  // CHECK-NEXT:    %b.addr = alloca [12 x i64]*, align 8
-  // CHECK-NEXT:    store [12 x i64]* %a, [12 x i64]** %a.addr, align 8
-  // CHECK-NEXT:    store [12 x i64]* %b, [12 x i64]** %b.addr, align 8
-  // CHECK-NEXT:    %0 = load [12 x i64]*, [12 x i64]** %b.addr, align 8
-  // CHECK-NEXT:    %1 = bitcast [12 x i64]* %0 to <12 x i64>*
-  // CHECK-NEXT:    %2 = load <12 x i64>, <12 x i64>* %1, align 8
-  // CHECK-NEXT:    %3 = load [12 x i64]*, [12 x i64]** %a.addr, align 8
-  // CHECK-NEXT:    %4 = bitcast [12 x i64]* %3 to <12 x i64>*
-  // CHECK-NEXT:    store <12 x i64> %2, <12 x i64>* %4, align 8
+  // CHECK-NEXT:    %a.addr = alloca ptr, align 8
+  // CHECK-NEXT:    %b.addr = alloca ptr, align 8
+  // CHECK-NEXT:    store ptr %a, ptr %a.addr, align 8
+  // CHECK-NEXT:    store ptr %b, ptr %b.addr, align 8
+  // CHECK-NEXT:    %0 = load ptr, ptr %b.addr, align 8
+  // CHECK-NEXT:    %1 = load <12 x i64>, ptr %0, align 8
+  // CHECK-NEXT:    %2 = load ptr, ptr %a.addr, align 8
+  // CHECK-NEXT:    store <12 x i64> %1, ptr %2, align 8
   // CHECK-NEXT:   ret void
 
   *a = *b;
@@ -91,16 +83,14 @@ typedef __fp16 fp16x3x4_t __attribute__((matrix_type(4, 3)));
 void load_store_fp16(fp16x3x4_t *a, fp16x3x4_t *b) {
   // CHECK-LABEL:  define{{.*}} void @load_store_fp16(
   // CHECK-NEXT:  entry:
-  // CHECK-NEXT:    %a.addr = alloca [12 x half]*, align 8
-  // CHECK-NEXT:    %b.addr = alloca [12 x half]*, align 8
-  // CHECK-NEXT:    store [12 x half]* %a, [12 x half]** %a.addr, align 8
-  // CHECK-NEXT:    store [12 x half]* %b, [12 x half]** %b.addr, align 8
-  // CHECK-NEXT:    %0 = load [12 x half]*, [12 x half]** %b.addr, align 8
-  // CHECK-NEXT:    %1 = bitcast [12 x half]* %0 to <12 x half>*
-  // CHECK-NEXT:    %2 = load <12 x half>, <12 x half>* %1, align 2
-  // CHECK-NEXT:    %3 = load [12 x half]*, [12 x half]** %a.addr, align 8
-  // CHECK-NEXT:    %4 = bitcast [12 x half]* %3 to <12 x half>*
-  // CHECK-NEXT:    store <12 x half> %2, <12 x half>* %4, align 2
+  // CHECK-NEXT:    %a.addr = alloca ptr, align 8
+  // CHECK-NEXT:    %b.addr = alloca ptr, align 8
+  // CHECK-NEXT:    store ptr %a, ptr %a.addr, align 8
+  // CHECK-NEXT:    store ptr %b, ptr %b.addr, align 8
+  // CHECK-NEXT:    %0 = load ptr, ptr %b.addr, align 8
+  // CHECK-NEXT:    %1 = load <12 x half>, ptr %0, align 2
+  // CHECK-NEXT:    %2 = load ptr, ptr %a.addr, align 8
+  // CHECK-NEXT:    store <12 x half> %1, ptr %2, align 2
   // CHECK-NEXT:   ret void
 
   *a = *b;
@@ -112,14 +102,12 @@ void parameter_passing(fx3x3_t a, fx3x3_t *b) {
   // CHECK-LABEL: define{{.*}} void @parameter_passing(
   // CHECK-NEXT:  entry:
   // CHECK-NEXT:    %a.addr = alloca [9 x float], align 4
-  // CHECK-NEXT:    %b.addr = alloca [9 x float]*, align 8
-  // CHECK-NEXT:    %0 = bitcast [9 x float]* %a.addr to <9 x float>*
-  // CHECK-NEXT:    store <9 x float> %a, <9 x float>* %0, align 4
-  // CHECK-NEXT:    store [9 x float]* %b, [9 x float]** %b.addr, align 8
-  // CHECK-NEXT:    %1 = load <9 x float>, <9 x float>* %0, align 4
-  // CHECK-NEXT:    %2 = load [9 x float]*, [9 x float]** %b.addr, align 8
-  // CHECK-NEXT:    %3 = bitcast [9 x float]* %2 to <9 x float>*
-  // CHECK-NEXT:    store <9 x float> %1, <9 x float>* %3, align 4
+  // CHECK-NEXT:    %b.addr = alloca ptr, align 8
+  // CHECK-NEXT:    store <9 x float> %a, ptr %a.addr, align 4
+  // CHECK-NEXT:    store ptr %b, ptr %b.addr, align 8
+  // CHECK-NEXT:    %0 = load <9 x float>, ptr %a.addr, align 4
+  // CHECK-NEXT:    %1 = load ptr, ptr %b.addr, align 8
+  // CHECK-NEXT:    store <9 x float> %0, ptr %1, align 4
   // CHECK-NEXT:    ret void
   *b = a;
 }
@@ -127,12 +115,11 @@ void parameter_passing(fx3x3_t a, fx3x3_t *b) {
 fx3x3_t return_matrix(fx3x3_t *a) {
   // CHECK-LABEL: define{{.*}} <9 x float> @return_matrix
   // CHECK-NEXT:  entry:
-  // CHECK-NEXT:    %a.addr = alloca [9 x float]*, align 8
-  // CHECK-NEXT:    store [9 x float]* %a, [9 x float]** %a.addr, align 8
-  // CHECK-NEXT:    %0 = load [9 x float]*, [9 x float]** %a.addr, align 8
-  // CHECK-NEXT:    %1 = bitcast [9 x float]* %0 to <9 x float>*
-  // CHECK-NEXT:    %2 = load <9 x float>, <9 x float>* %1, align 4
-  // CHECK-NEXT:    ret <9 x float> %2
+  // CHECK-NEXT:    %a.addr = alloca ptr, align 8
+  // CHECK-NEXT:    store ptr %a, ptr %a.addr, align 8
+  // CHECK-NEXT:    %0 = load ptr, ptr %a.addr, align 8
+  // CHECK-NEXT:    %1 = load <9 x float>, ptr %0, align 4
+  // CHECK-NEXT:    ret <9 x float> %1
   return *a;
 }
 
@@ -145,18 +132,16 @@ typedef struct {
 void matrix_struct(Matrix *a, Matrix *b) {
   // CHECK-LABEL: define{{.*}} void @matrix_struct(
   // CHECK-NEXT:  entry:
-  // CHECK-NEXT:    %a.addr = alloca %struct.Matrix*, align 8
-  // CHECK-NEXT:    %b.addr = alloca %struct.Matrix*, align 8
-  // CHECK-NEXT:    store %struct.Matrix* %a, %struct.Matrix** %a.addr, align 8
-  // CHECK-NEXT:    store %struct.Matrix* %b, %struct.Matrix** %b.addr, align 8
-  // CHECK-NEXT:    %0 = load %struct.Matrix*, %struct.Matrix** %a.addr, align 8
-  // CHECK-NEXT:    %Data = getelementptr inbounds %struct.Matrix, %struct.Matrix* %0, i32 0, i32 1
-  // CHECK-NEXT:    %1 = bitcast [12 x float]* %Data to <12 x float>*
-  // CHECK-NEXT:    %2 = load <12 x float>, <12 x float>* %1, align 4
-  // CHECK-NEXT:    %3 = load %struct.Matrix*, %struct.Matrix** %b.addr, align 8
-  // CHECK-NEXT:    %Data1 = getelementptr inbounds %struct.Matrix, %struct.Matrix* %3, i32 0, i32 1
-  // CHECK-NEXT:    %4 = bitcast [12 x float]* %Data1 to <12 x float>*
-  // CHECK-NEXT:    store <12 x float> %2, <12 x float>* %4, align 4
+  // CHECK-NEXT:    %a.addr = alloca ptr, align 8
+  // CHECK-NEXT:    %b.addr = alloca ptr, align 8
+  // CHECK-NEXT:    store ptr %a, ptr %a.addr, align 8
+  // CHECK-NEXT:    store ptr %b, ptr %b.addr, align 8
+  // CHECK-NEXT:    %0 = load ptr, ptr %a.addr, align 8
+  // CHECK-NEXT:    %Data = getelementptr inbounds %struct.Matrix, ptr %0, i32 0, i32 1
+  // CHECK-NEXT:    %1 = load <12 x float>, ptr %Data, align 4
+  // CHECK-NEXT:    %2 = load ptr, ptr %b.addr, align 8
+  // CHECK-NEXT:    %Data1 = getelementptr inbounds %struct.Matrix, ptr %2, i32 0, i32 1
+  // CHECK-NEXT:    store <12 x float> %1, ptr %Data1, align 4
   // CHECK-NEXT:    ret void
   b->Data = a->Data;
 }
@@ -166,10 +151,8 @@ void matrix_inline_asm_memory_readwrite(void) {
   // CHECK-LABEL: define{{.*}} void @matrix_inline_asm_memory_readwrite()
   // CHECK-NEXT:  entry:
   // CHECK-NEXT:    [[ALLOCA:%.+]] = alloca [16 x double], align 8
-  // CHECK-NEXT:    [[PTR1:%.+]] = bitcast [16 x double]* [[ALLOCA]] to <16 x double>*
-  // CHECK-NEXT:    [[PTR2:%.+]] = bitcast [16 x double]* [[ALLOCA]] to <16 x double>*
-  // CHECK-NEXT:    [[VAL:%.+]] = load <16 x double>, <16 x double>* [[PTR2]], align 8
-  // CHECK-NEXT:    call void asm sideeffect "", "=*r|m,0,~{memory},~{dirflag},~{fpsr},~{flags}"(<16 x double>* elementtype(<16 x double>) [[PTR1]], <16 x double> [[VAL]])
+  // CHECK-NEXT:    [[VAL:%.+]] = load <16 x double>, ptr [[ALLOCA]], align 8
+  // CHECK-NEXT:    call void asm sideeffect "", "=*r|m,0,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(<16 x double>) [[ALLOCA]], <16 x double> [[VAL]])
   // CHECK-NEXT:    ret void
 
   dx4x4_t m;

diff  --git a/clang/test/CodeGen/mingw-long-double.c b/clang/test/CodeGen/mingw-long-double.c
index 306024f6ecf26..a50f8f0b3b633 100644
--- a/clang/test/CodeGen/mingw-long-double.c
+++ b/clang/test/CodeGen/mingw-long-double.c
@@ -1,10 +1,10 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple i686-windows-gnu -emit-llvm -o - %s \
+// RUN: %clang_cc1 -triple i686-windows-gnu -emit-llvm -o - %s \
 // RUN:    | FileCheck %s --check-prefix=GNU32
-// RUN: %clang_cc1 -no-opaque-pointers -triple i686-windows-gnu -emit-llvm -o - %s -mms-bitfields \
+// RUN: %clang_cc1 -triple i686-windows-gnu -emit-llvm -o - %s -mms-bitfields \
 // RUN:    | FileCheck %s --check-prefix=GNU32
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-windows-gnu -emit-llvm -o - %s \
+// RUN: %clang_cc1 -triple x86_64-windows-gnu -emit-llvm -o - %s \
 // RUN:    | FileCheck %s --check-prefix=GNU64
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-windows-msvc -emit-llvm -o - %s \
+// RUN: %clang_cc1 -triple x86_64-windows-msvc -emit-llvm -o - %s \
 // RUN:    | FileCheck %s --check-prefix=MSC64
 
 struct {
@@ -32,15 +32,15 @@ long double TestLD(long double x) {
   return x * x;
 }
 // GNU32: define dso_local x86_fp80 @TestLD(x86_fp80 noundef %x)
-// GNU64: define dso_local void @TestLD(x86_fp80* noalias sret(x86_fp80) align 16 %agg.result, x86_fp80* noundef %0)
+// GNU64: define dso_local void @TestLD(ptr noalias sret(x86_fp80) align 16 %agg.result, ptr noundef %0)
 // MSC64: define dso_local double @TestLD(double noundef %x)
 
 long double _Complex TestLDC(long double _Complex x) {
   return x * x;
 }
-// GNU32: define dso_local void @TestLDC({ x86_fp80, x86_fp80 }* noalias sret({ x86_fp80, x86_fp80 }) align 4 %agg.result, { x86_fp80, x86_fp80 }* noundef byval({ x86_fp80, x86_fp80 }) align 4 %x)
-// GNU64: define dso_local void @TestLDC({ x86_fp80, x86_fp80 }* noalias sret({ x86_fp80, x86_fp80 }) align 16 %agg.result, { x86_fp80, x86_fp80 }* noundef %x)
-// MSC64: define dso_local void @TestLDC({ double, double }* noalias sret({ double, double }) align 8 %agg.result, { double, double }* noundef %x)
+// GNU32: define dso_local void @TestLDC(ptr noalias sret({ x86_fp80, x86_fp80 }) align 4 %agg.result, ptr noundef byval({ x86_fp80, x86_fp80 }) align 4 %x)
+// GNU64: define dso_local void @TestLDC(ptr noalias sret({ x86_fp80, x86_fp80 }) align 16 %agg.result, ptr noundef %x)
+// MSC64: define dso_local void @TestLDC(ptr noalias sret({ double, double }) align 8 %agg.result, ptr noundef %x)
 
 // GNU32: declare dso_local void @__mulxc3
 // GNU64: declare dso_local void @__mulxc3
@@ -53,11 +53,9 @@ void VarArgLD(int a, ...) {
   __builtin_va_list ap;
   __builtin_va_start(ap, a);
   long double LD = __builtin_va_arg(ap, long double);
-  // GNU32-NOT: load x86_fp80*, x86_fp80**
-  // GNU32: load x86_fp80, x86_fp80*
-  // GNU64: load x86_fp80*, x86_fp80**
-  // GNU64: load x86_fp80, x86_fp80*
-  // MSC64-NOT: load double*, double**
-  // MSC64: load double, double*
+  // GNU32: load x86_fp80, ptr %argp.cur
+  // GNU64: [[P:%.*]] = load ptr, ptr %argp.cur
+  // GNU64: load x86_fp80, ptr [[P]]
+  // MSC64: load double, ptr %argp.cur
   __builtin_va_end(ap);
 }

diff  --git a/clang/test/CodeGen/mips-inline-asm-modifiers.c b/clang/test/CodeGen/mips-inline-asm-modifiers.c
index 11c32fcbd7678..c85a585b080bf 100644
--- a/clang/test/CodeGen/mips-inline-asm-modifiers.c
+++ b/clang/test/CodeGen/mips-inline-asm-modifiers.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple mipsel-unknown-linux -o - -emit-llvm %s \
+// RUN: %clang_cc1 -triple mipsel-unknown-linux -o - -emit-llvm %s \
 // RUN: | FileCheck %s
 
 // This checks that the frontend will accept inline asm operand modifiers
@@ -7,8 +7,8 @@ int printf(const char*, ...);
 
 typedef int v4i32 __attribute__((vector_size(16)));
 
-  // CHECK: %{{[0-9]+}} = call i32 asm ".set noreorder;\0Alw    $0,$1;\0A.set reorder;\0A", "=r,*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8 x i32], [8 x i32]* @b, i32 {{[0-9]+}}, i32 {{[0-9]+}})) #2,
-  // CHECK: %{{[0-9]+}} = call i32 asm "lw    $0,${1:D};\0A", "=r,*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8 x i32], [8 x i32]* @b, i32 {{[0-9]+}}, i32 {{[0-9]+}})) #2,
+  // CHECK: %{{[0-9]+}} = call i32 asm ".set noreorder;\0Alw    $0,$1;\0A.set reorder;\0A", "=r,*m,~{$1}"(ptr elementtype(i32) getelementptr inbounds (i32, ptr @b, i32 {{[0-9]+}})) #2,
+  // CHECK: %{{[0-9]+}} = call i32 asm "lw    $0,${1:D};\0A", "=r,*m,~{$1}"(ptr elementtype(i32) getelementptr inbounds (i32, ptr @b, i32 {{[0-9]+}})) #2,
   // CHECK: %{{[0-9]+}} = call <4 x i32> asm "ldi.w ${0:w},1", "=f,~{$1}"
 int b[8] = {0,1,2,3,4,5,6,7};
 int  main(void)

diff  --git a/clang/test/CodeGen/mips-varargs.c b/clang/test/CodeGen/mips-varargs.c
index 54c96c061f6b6..e8d11587fe681 100644
--- a/clang/test/CodeGen/mips-varargs.c
+++ b/clang/test/CodeGen/mips-varargs.c
@@ -1,9 +1,9 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple mips-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,O32 -enable-var-scope
-// RUN: %clang_cc1 -no-opaque-pointers -triple mipsel-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,O32 -enable-var-scope
-// RUN: %clang_cc1 -no-opaque-pointers -triple mips64-unknown-linux -o - -emit-llvm  -target-abi n32 %s | FileCheck %s -check-prefixes=ALL,N32,NEW -enable-var-scope
-// RUN: %clang_cc1 -no-opaque-pointers -triple mips64-unknown-linux -o - -emit-llvm  -target-abi n32 %s | FileCheck %s -check-prefixes=ALL,N32,NEW -enable-var-scope
-// RUN: %clang_cc1 -no-opaque-pointers -triple mips64-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,N64,NEW -enable-var-scope
-// RUN: %clang_cc1 -no-opaque-pointers -triple mips64el-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,N64,NEW -enable-var-scope
+// RUN: %clang_cc1 -triple mips-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,O32 -enable-var-scope
+// RUN: %clang_cc1 -triple mipsel-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,O32 -enable-var-scope
+// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm  -target-abi n32 %s | FileCheck %s -check-prefixes=ALL,N32,NEW -enable-var-scope
+// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm  -target-abi n32 %s | FileCheck %s -check-prefixes=ALL,N32,NEW -enable-var-scope
+// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,N64,NEW -enable-var-scope
+// RUN: %clang_cc1 -triple mips64el-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefixes=ALL,N64,NEW -enable-var-scope
 
 #include <stdarg.h>
 
@@ -19,38 +19,33 @@ int test_i32(char *fmt, ...) {
   return v;
 }
 
-// O32-LABEL: define{{.*}} i32 @test_i32(i8*{{.*}} %fmt, ...)
-// N32-LABEL: define{{.*}} signext i32 @test_i32(i8*{{.*}} %fmt, ...)
-// N64-LABEL: define{{.*}} signext i32 @test_i32(i8*{{.*}} %fmt, ...)
+// O32-LABEL: define{{.*}} i32 @test_i32(ptr{{.*}} %fmt, ...)
+// N32-LABEL: define{{.*}} signext i32 @test_i32(ptr{{.*}} %fmt, ...)
+// N64-LABEL: define{{.*}} signext i32 @test_i32(ptr{{.*}} %fmt, ...)
 //
-// O32:   %va = alloca i8*, align [[$PTRALIGN:4]]
-// N32:   %va = alloca i8*, align [[$PTRALIGN:4]]
-// N64:   %va = alloca i8*, align [[$PTRALIGN:8]]
+// O32:   %va = alloca ptr, align [[$PTRALIGN:4]]
+// N32:   %va = alloca ptr, align [[$PTRALIGN:4]]
+// N64:   %va = alloca ptr, align [[$PTRALIGN:8]]
 // ALL:   [[V:%.*]] = alloca i32, align 4
 // NEW:   [[PROMOTION_TEMP:%.*]] = alloca i32, align 4
 //
-// ALL:   [[VA:%.+]] = bitcast i8** %va to i8*
-// ALL:   call void @llvm.va_start(i8* [[VA]])
-// ALL:   [[AP_CUR:%.+]] = load i8*, i8** %va, align [[$PTRALIGN]]
-// O32:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T:i32]] [[$CHUNKSIZE:4]]
-// NEW:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T:i32|i64]] [[$CHUNKSIZE:8]]
+// ALL:   call void @llvm.va_start(ptr %va)
+// ALL:   [[AP_CUR:%.+]] = load ptr, ptr %va, align [[$PTRALIGN]]
+// O32:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, ptr [[AP_CUR]], [[$INTPTR_T:i32]] [[$CHUNKSIZE:4]]
+// NEW:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, ptr [[AP_CUR]], [[$INTPTR_T:i32|i64]] [[$CHUNKSIZE:8]]
 //
-// ALL:   store i8* [[AP_NEXT]], i8** %va, align [[$PTRALIGN]]
+// ALL:   store ptr [[AP_NEXT]], ptr %va, align [[$PTRALIGN]]
 //
-// O32:   [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i32]]*
-// O32:   [[ARG:%.+]] = load i32, i32* [[AP_CAST]], align [[CHUNKALIGN:4]]
+// O32:   [[ARG:%.+]] = load i32, ptr [[AP_CUR]], align [[CHUNKALIGN:4]]
 //
-// N32:   [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i64]]*
-// N32:   [[TMP:%.+]] = load i64, i64* [[AP_CAST]], align [[CHUNKALIGN:8]]
-// N64:   [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i64]]*
-// N64:   [[TMP:%.+]] = load i64, i64* [[AP_CAST]], align [[CHUNKALIGN:8]]
+// N32:   [[TMP:%.+]] = load i64, ptr [[AP_CUR]], align [[CHUNKALIGN:8]]
+// N64:   [[TMP:%.+]] = load i64, ptr [[AP_CUR]], align [[CHUNKALIGN:8]]
 // NEW:   [[TMP2:%.+]] = trunc i64 [[TMP]] to i32
-// NEW:   store i32 [[TMP2]], i32* [[PROMOTION_TEMP]], align 4
-// NEW:   [[ARG:%.+]] = load i32, i32* [[PROMOTION_TEMP]], align 4
-// ALL:   store i32 [[ARG]], i32* [[V]], align 4
+// NEW:   store i32 [[TMP2]], ptr [[PROMOTION_TEMP]], align 4
+// NEW:   [[ARG:%.+]] = load i32, ptr [[PROMOTION_TEMP]], align 4
+// ALL:   store i32 [[ARG]], ptr [[V]], align 4
 //
-// ALL:   [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL:   call void @llvm.va_end(i8* [[VA1]])
+// ALL:   call void @llvm.va_end(ptr %va)
 // ALL: }
 
 long long test_i64(char *fmt, ...) {
@@ -63,28 +58,25 @@ long long test_i64(char *fmt, ...) {
   return v;
 }
 
-// ALL-LABEL: define{{.*}} i64 @test_i64(i8*{{.*}} %fmt, ...)
+// ALL-LABEL: define{{.*}} i64 @test_i64(ptr{{.*}} %fmt, ...)
 //
-// ALL:   %va = alloca i8*, align [[$PTRALIGN]]
-// ALL:   [[VA:%.+]] = bitcast i8** %va to i8*
-// ALL:   call void @llvm.va_start(i8* [[VA]])
-// ALL:   [[AP_CUR:%.+]] = load i8*, i8** %va, align [[$PTRALIGN]]
+// ALL:   %va = alloca ptr, align [[$PTRALIGN]]
+// ALL:   call void @llvm.va_start(ptr %va)
+// ALL:   [[AP_CUR:%.+]] = load ptr, ptr %va, align [[$PTRALIGN]]
 //
 // i64 is 8-byte aligned, while this is within O32's stack alignment there's no
 // guarantee that the offset is still 8-byte aligned after earlier reads.
-// O32:   [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to i32
+// O32:   [[TMP1:%.+]] = ptrtoint ptr [[AP_CUR]] to i32
 // O32:   [[TMP2:%.+]] = add i32 [[TMP1]], 7
 // O32:   [[TMP3:%.+]] = and i32 [[TMP2]], -8
-// O32:   [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8*
+// O32:   [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to ptr
 //
-// ALL:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T]] 8
-// ALL:   store i8* [[AP_NEXT]], i8** %va, align [[$PTRALIGN]]
+// ALL:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, ptr [[AP_CUR]], [[$INTPTR_T]] 8
+// ALL:   store ptr [[AP_NEXT]], ptr %va, align [[$PTRALIGN]]
 //
-// ALL:   [[AP_CAST:%.*]] = bitcast i8* [[AP_CUR]] to i64*
-// ALL:   [[ARG:%.+]] = load i64, i64* [[AP_CAST]], align 8
+// ALL:   [[ARG:%.+]] = load i64, ptr [[AP_CUR]], align 8
 //
-// ALL:   [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL:   call void @llvm.va_end(i8* [[VA1]])
+// ALL:   call void @llvm.va_end(ptr %va)
 // ALL: }
 
 char *test_ptr(char *fmt, ...) {
@@ -97,32 +89,29 @@ char *test_ptr(char *fmt, ...) {
   return v;
 }
 
-// ALL-LABEL: define{{.*}} i8* @test_ptr(i8*{{.*}} %fmt, ...)
+// ALL-LABEL: define{{.*}} ptr @test_ptr(ptr{{.*}} %fmt, ...)
 //
-// ALL:   %va = alloca i8*, align [[$PTRALIGN]]
-// ALL:   [[V:%.*]] = alloca i8*, align [[$PTRALIGN]]
-// N32:   [[AP_CAST:%.+]] = alloca i8*, align 4
-// ALL:   [[VA:%.+]] = bitcast i8** %va to i8*
-// ALL:   call void @llvm.va_start(i8* [[VA]])
-// ALL:   [[AP_CUR:%.+]] = load i8*, i8** %va, align [[$PTRALIGN]]
-// ALL:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T]] [[$CHUNKSIZE]]
-// ALL:   store i8* [[AP_NEXT]], i8** %va, align [[$PTRALIGN]]
+// ALL:   %va = alloca ptr, align [[$PTRALIGN]]
+// ALL:   [[V:%.*]] = alloca ptr, align [[$PTRALIGN]]
+// N32:   [[AP_CAST:%.+]] = alloca ptr, align 4
+// ALL:   call void @llvm.va_start(ptr %va)
+// ALL:   [[AP_CUR:%.+]] = load ptr, ptr %va, align [[$PTRALIGN]]
+// ALL:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, ptr [[AP_CUR]], [[$INTPTR_T]] [[$CHUNKSIZE]]
+// ALL:   store ptr [[AP_NEXT]], ptr %va, align [[$PTRALIGN]]
 //
 // When the chunk size matches the pointer size, this is easy.
-// O32:   [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8**
-// N64:   [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8**
 // Otherwise we need a promotion temporary.
-// N32:   [[TMP1:%.+]] = bitcast i8* [[AP_CUR]] to i64*
-// N32:   [[TMP2:%.+]] = load i64, i64* [[TMP1]], align 8
+// N32:   [[TMP2:%.+]] = load i64, ptr [[AP_CUR]], align 8
 // N32:   [[TMP3:%.+]] = trunc i64 [[TMP2]] to i32
-// N32:   [[PTR:%.+]] = inttoptr i32 [[TMP3]] to i8*
-// N32:   store i8* [[PTR]], i8** [[AP_CAST]], align 4
+// N32:   [[PTR:%.+]] = inttoptr i32 [[TMP3]] to ptr
+// N32:   store ptr [[PTR]], ptr [[AP_CAST]], align 4
+// N32:   [[ARG:%.+]] = load ptr, ptr [[AP_CAST]], align [[$PTRALIGN]]
 //
-// ALL:   [[ARG:%.+]] = load i8*, i8** [[AP_CAST]], align [[$PTRALIGN]]
-// ALL:   store i8* [[ARG]], i8** [[V]], align [[$PTRALIGN]]
+// O32:   [[ARG:%.+]] = load ptr, ptr [[AP_CUR]], align [[$PTRALIGN]]
+// N64:   [[ARG:%.+]] = load ptr, ptr [[AP_CUR]], align [[$PTRALIGN]]
+// ALL:   store ptr [[ARG]], ptr [[V]], align [[$PTRALIGN]]
 //
-// ALL:   [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL:   call void @llvm.va_end(i8* [[VA1]])
+// ALL:   call void @llvm.va_end(ptr %va)
 // ALL: }
 
 int test_v4i32(char *fmt, ...) {
@@ -135,39 +124,36 @@ int test_v4i32(char *fmt, ...) {
   return v[0];
 }
 
-// O32-LABEL: define{{.*}} i32 @test_v4i32(i8*{{.*}} %fmt, ...)
-// N32-LABEL: define{{.*}} signext i32 @test_v4i32(i8*{{.*}} %fmt, ...)
-// N64-LABEL: define{{.*}} signext i32 @test_v4i32(i8*{{.*}} %fmt, ...)
+// O32-LABEL: define{{.*}} i32 @test_v4i32(ptr{{.*}} %fmt, ...)
+// N32-LABEL: define{{.*}} signext i32 @test_v4i32(ptr{{.*}} %fmt, ...)
+// N64-LABEL: define{{.*}} signext i32 @test_v4i32(ptr{{.*}} %fmt, ...)
 //
-// ALL:   %va = alloca i8*, align [[$PTRALIGN]]
+// ALL:   %va = alloca ptr, align [[$PTRALIGN]]
 // ALL:   [[V:%.+]] = alloca <4 x i32>, align 16
-// ALL:   [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL:   call void @llvm.va_start(i8* [[VA1]])
-// ALL:   [[AP_CUR:%.+]] = load i8*, i8** %va, align [[$PTRALIGN]]
+// ALL:   call void @llvm.va_start(ptr %va)
+// ALL:   [[AP_CUR:%.+]] = load ptr, ptr %va, align [[$PTRALIGN]]
 //
 // Vectors are 16-byte aligned, however the O32 ABI has a maximum alignment of
 // 8-bytes since the base of the stack is 8-byte aligned.
-// O32:   [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to i32
+// O32:   [[TMP1:%.+]] = ptrtoint ptr [[AP_CUR]] to i32
 // O32:   [[TMP2:%.+]] = add i32 [[TMP1]], 7
 // O32:   [[TMP3:%.+]] = and i32 [[TMP2]], -8
-// O32:   [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8*
+// O32:   [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to ptr
 //
-// NEW:   [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to [[$INTPTR_T]]
+// NEW:   [[TMP1:%.+]] = ptrtoint ptr [[AP_CUR]] to [[$INTPTR_T]]
 // NEW:   [[TMP2:%.+]] = add [[$INTPTR_T]] [[TMP1]], 15
 // NEW:   [[TMP3:%.+]] = and [[$INTPTR_T]] [[TMP2]], -16
-// NEW:   [[AP_CUR:%.+]] = inttoptr [[$INTPTR_T]] [[TMP3]] to i8*
+// NEW:   [[AP_CUR:%.+]] = inttoptr [[$INTPTR_T]] [[TMP3]] to ptr
 //
-// ALL:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[$INTPTR_T]] 16
-// ALL:   store i8* [[AP_NEXT]], i8** %va, align [[$PTRALIGN]]
+// ALL:   [[AP_NEXT:%.+]] = getelementptr inbounds i8, ptr [[AP_CUR]], [[$INTPTR_T]] 16
+// ALL:   store ptr [[AP_NEXT]], ptr %va, align [[$PTRALIGN]]
 //
-// ALL:   [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to <4 x i32>*
-// O32:   [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 8
-// N64:   [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 16
-// N32:   [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 16
-// ALL:   store <4 x i32> [[ARG]], <4 x i32>* [[V]], align 16
+// O32:   [[ARG:%.+]] = load <4 x i32>, ptr [[AP_CUR]], align 8
+// N64:   [[ARG:%.+]] = load <4 x i32>, ptr [[AP_CUR]], align 16
+// N32:   [[ARG:%.+]] = load <4 x i32>, ptr [[AP_CUR]], align 16
+// ALL:   store <4 x i32> [[ARG]], ptr [[V]], align 16
 //
-// ALL:   [[VA1:%.+]] = bitcast i8** %va to i8*
-// ALL:   call void @llvm.va_end(i8* [[VA1]])
+// ALL:   call void @llvm.va_end(ptr %va)
 // ALL:   [[VECEXT:%.+]] = extractelement <4 x i32> {{.*}}, i32 0
 // ALL:   ret i32 [[VECEXT]]
 // ALL: }

diff  --git a/clang/test/CodeGen/ms-intrinsics.c b/clang/test/CodeGen/ms-intrinsics.c
index 55e9dc0adf7db..3a9e04433a32f 100644
--- a/clang/test/CodeGen/ms-intrinsics.c
+++ b/clang/test/CodeGen/ms-intrinsics.c
@@ -1,13 +1,13 @@
-// RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
+// RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
 // RUN:         -triple i686--windows -Oz -emit-llvm %s -o - \
 // RUN:         | FileCheck %s -check-prefixes CHECK,CHECK-I386,CHECK-INTEL
-// RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
+// RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
 // RUN:         -triple thumbv7--windows -Oz -emit-llvm %s -o - \
 // RUN:         | FileCheck %s --check-prefixes CHECK,CHECK-ARM,CHECK-ARM-ARM64,CHECK-ARM-X64
-// RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
+// RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
 // RUN:         -triple x86_64--windows -Oz -emit-llvm -target-feature +cx16 %s -o - \
 // RUN:         | FileCheck %s --check-prefixes CHECK,CHECK-X64,CHECK-ARM-X64,CHECK-INTEL,CHECK-64
-// RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
+// RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
 // RUN:         -triple aarch64-windows -Oz -emit-llvm %s -o - \
 // RUN:         | FileCheck %s --check-prefixes CHECK-ARM-ARM64,CHECK-ARM-X64,CHECK-ARM64,CHECK-64
 
@@ -23,12 +23,12 @@ void test__stosb(unsigned char *Dest, unsigned char Data, size_t Count) {
 }
 
 // CHECK-I386: define{{.*}}void @test__stosb
-// CHECK-I386:   tail call void @llvm.memset.p0i8.i32(i8* align 1 %Dest, i8 %Data, i32 %Count, i1 true)
+// CHECK-I386:   tail call void @llvm.memset.p0.i32(ptr align 1 %Dest, i8 %Data, i32 %Count, i1 true)
 // CHECK-I386:   ret void
 // CHECK-I386: }
 
 // CHECK-X64: define{{.*}}void @test__stosb
-// CHECK-X64:   tail call void @llvm.memset.p0i8.i64(i8* align 1 %Dest, i8 %Data, i64 %Count, i1 true)
+// CHECK-X64:   tail call void @llvm.memset.p0.i64(ptr align 1 %Dest, i8 %Data, i64 %Count, i1 true)
 // CHECK-X64:   ret void
 // CHECK-X64: }
 
@@ -36,12 +36,12 @@ void test__movsb(unsigned char *Dest, unsigned char *Src, size_t Count) {
   return __movsb(Dest, Src, Count);
 }
 // CHECK-I386-LABEL: define{{.*}} void @test__movsb
-// CHECK-I386:   tail call { i8*, i8*, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsb\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i32 %Count)
+// CHECK-I386:   tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsb\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count)
 // CHECK-I386:   ret void
 // CHECK-I386: }
 
 // CHECK-X64-LABEL: define{{.*}} void @test__movsb
-// CHECK-X64:   call { i8*, i8*, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i64 %Count)
+// CHECK-X64:   call { ptr, ptr, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count)
 // CHECK-X64:   ret void
 // CHECK-X64: }
 
@@ -49,12 +49,12 @@ void test__stosw(unsigned short *Dest, unsigned short Data, size_t Count) {
   return __stosw(Dest, Data, Count);
 }
 // CHECK-I386-LABEL: define{{.*}} void @test__stosw
-// CHECK-I386:   call { i16*, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i32 %Count)
+// CHECK-I386:   call { ptr, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, ptr %Dest, i32 %Count)
 // CHECK-I386:   ret void
 // CHECK-I386: }
 
 // CHECK-X64-LABEL: define{{.*}} void @test__stosw
-// CHECK-X64:   call { i16*, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i64 %Count)
+// CHECK-X64:   call { ptr, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, ptr %Dest, i64 %Count)
 // CHECK-X64:   ret void
 // CHECK-X64: }
 
@@ -62,12 +62,12 @@ void test__movsw(unsigned short *Dest, unsigned short *Src, size_t Count) {
   return __movsw(Dest, Src, Count);
 }
 // CHECK-I386-LABEL: define{{.*}} void @test__movsw
-// CHECK-I386:   tail call { i16*, i16*, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsw\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i32 %Count)
+// CHECK-I386:   tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsw\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count)
 // CHECK-I386:   ret void
 // CHECK-I386: }
 
 // CHECK-X64-LABEL: define{{.*}} void @test__movsw
-// CHECK-X64:   call { i16*, i16*, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i64 %Count)
+// CHECK-X64:   call { ptr, ptr, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count)
 // CHECK-X64:   ret void
 // CHECK-X64: }
 
@@ -75,12 +75,12 @@ void test__stosd(unsigned long *Dest, unsigned long Data, size_t Count) {
   return __stosd(Dest, Data, Count);
 }
 // CHECK-I386-LABEL: define{{.*}} void @test__stosd
-// CHECK-I386:   call { i32*, i32 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i32 %Count)
+// CHECK-I386:   call { ptr, i32 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, ptr %Dest, i32 %Count)
 // CHECK-I386:   ret void
 // CHECK-I386: }
 
 // CHECK-X64-LABEL: define{{.*}} void @test__stosd
-// CHECK-X64:   call { i32*, i64 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i64 %Count)
+// CHECK-X64:   call { ptr, i64 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, ptr %Dest, i64 %Count)
 // CHECK-X64:   ret void
 // CHECK-X64: }
 
@@ -88,12 +88,12 @@ void test__movsd(unsigned long *Dest, unsigned long *Src, size_t Count) {
   return __movsd(Dest, Src, Count);
 }
 // CHECK-I386-LABEL: define{{.*}} void @test__movsd
-// CHECK-I386:   tail call { i32*, i32*, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movs$(l$|d$)\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i32 %Count)
+// CHECK-I386:   tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movs$(l$|d$)\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count)
 // CHECK-I386:   ret void
 // CHECK-I386: }
 
 // CHECK-X64-LABEL: define{{.*}} void @test__movsd
-// CHECK-X64:   call { i32*, i32*, i64 } asm sideeffect "rep movs$(l$|d$)", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i64 %Count)
+// CHECK-X64:   call { ptr, ptr, i64 } asm sideeffect "rep movs$(l$|d$)", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count)
 // CHECK-X64:   ret void
 // CHECK-X64: }
 
@@ -102,7 +102,7 @@ void test__stosq(unsigned __int64 *Dest, unsigned __int64 Data, size_t Count) {
   return __stosq(Dest, Data, Count);
 }
 // CHECK-X64-LABEL: define{{.*}} void @test__stosq
-// CHECK-X64:   call { i64*, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, i64* %Dest, i64 %Count)
+// CHECK-X64:   call { ptr, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, ptr %Dest, i64 %Count)
 // CHECK-X64:   ret void
 // CHECK-X64: }
 
@@ -110,7 +110,7 @@ void test__movsq(unsigned __int64 *Dest, unsigned __int64 *Src, size_t Count) {
   return __movsq(Dest, Src, Count);
 }
 // CHECK-X64-LABEL: define{{.*}} void @test__movsq
-// CHECK-X64:   call { i64*, i64*, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Dest, i64* %Src, i64 %Count)
+// CHECK-X64:   call { ptr, ptr, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count)
 // CHECK-X64:   ret void
 // CHECK-X64: }
 #endif
@@ -133,55 +133,55 @@ void test__int2c(void) {
 void *test_ReturnAddress(void) {
   return _ReturnAddress();
 }
-// CHECK-LABEL: define{{.*}}i8* @test_ReturnAddress()
-// CHECK: = tail call i8* @llvm.returnaddress(i32 0)
-// CHECK: ret i8*
+// CHECK-LABEL: define{{.*}}ptr @test_ReturnAddress()
+// CHECK: = tail call ptr @llvm.returnaddress(i32 0)
+// CHECK: ret ptr
 
 #if defined(__i386__) || defined(__x86_64__) || defined (__aarch64__)
 void *test_AddressOfReturnAddress(void) {
   return _AddressOfReturnAddress();
 }
-// CHECK-INTEL-LABEL: define dso_local i8* @test_AddressOfReturnAddress()
-// CHECK-INTEL: = tail call i8* @llvm.addressofreturnaddress.p0i8()
-// CHECK-INTEL: ret i8*
+// CHECK-INTEL-LABEL: define dso_local ptr @test_AddressOfReturnAddress()
+// CHECK-INTEL: = tail call ptr @llvm.addressofreturnaddress.p0()
+// CHECK-INTEL: ret ptr
 #endif
 
 unsigned char test_BitScanForward(unsigned long *Index, unsigned long Mask) {
   return _BitScanForward(++Index, Mask);
 }
-// CHECK: define{{.*}}i8 @test_BitScanForward(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
+// CHECK: define{{.*}}i8 @test_BitScanForward(ptr {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
 // CHECK:   [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0
 // CHECK:   br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
 // CHECK:   [[END_LABEL]]:
 // CHECK:   [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
 // CHECK:   ret i8 [[RESULT]]
 // CHECK:   [[ISNOTZERO_LABEL]]:
-// CHECK:   [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds i32, i32* %Index, {{i64|i32}} 1
+// CHECK:   [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds i32, ptr %Index, {{i64|i32}} 1
 // CHECK:   [[INDEX:%[0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %Mask, i1 true)
-// CHECK:   store i32 [[INDEX]], i32* [[IDXGEP]], align 4
+// CHECK:   store i32 [[INDEX]], ptr [[IDXGEP]], align 4
 // CHECK:   br label %[[END_LABEL]]
 
 unsigned char test_BitScanReverse(unsigned long *Index, unsigned long Mask) {
   return _BitScanReverse(++Index, Mask);
 }
-// CHECK: define{{.*}}i8 @test_BitScanReverse(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
+// CHECK: define{{.*}}i8 @test_BitScanReverse(ptr {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
 // CHECK:   [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0
 // CHECK:   br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
 // CHECK:   [[END_LABEL]]:
 // CHECK:   [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
 // CHECK:   ret i8 [[RESULT]]
 // CHECK:   [[ISNOTZERO_LABEL]]:
-// CHECK:   [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds i32, i32* %Index, {{i64|i32}} 1
+// CHECK:   [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds i32, ptr %Index, {{i64|i32}} 1
 // CHECK:   [[REVINDEX:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %Mask, i1 true)
 // CHECK:   [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31
-// CHECK:   store i32 [[INDEX]], i32* [[IDXGEP]], align 4
+// CHECK:   store i32 [[INDEX]], ptr [[IDXGEP]], align 4
 // CHECK:   br label %[[END_LABEL]]
 
 #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 unsigned char test_BitScanForward64(unsigned long *Index, unsigned __int64 Mask) {
   return _BitScanForward64(Index, Mask);
 }
-// CHECK-ARM-X64: define{{.*}}i8 @test_BitScanForward64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
+// CHECK-ARM-X64: define{{.*}}i8 @test_BitScanForward64(ptr {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
 // CHECK-ARM-X64:   [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0
 // CHECK-ARM-X64:   br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
 // CHECK-ARM-X64:   [[END_LABEL]]:
@@ -190,13 +190,13 @@ unsigned char test_BitScanForward64(unsigned long *Index, unsigned __int64 Mask)
 // CHECK-ARM-X64:   [[ISNOTZERO_LABEL]]:
 // CHECK-ARM-X64:   [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true)
 // CHECK-ARM-X64:   [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32
-// CHECK-ARM-X64:   store i32 [[TRUNC_INDEX]], i32* %Index, align 4
+// CHECK-ARM-X64:   store i32 [[TRUNC_INDEX]], ptr %Index, align 4
 // CHECK-ARM-X64:   br label %[[END_LABEL]]
 
 unsigned char test_BitScanReverse64(unsigned long *Index, unsigned __int64 Mask) {
   return _BitScanReverse64(Index, Mask);
 }
-// CHECK-ARM-X64: define{{.*}}i8 @test_BitScanReverse64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
+// CHECK-ARM-X64: define{{.*}}i8 @test_BitScanReverse64(ptr {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
 // CHECK-ARM-X64:   [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0
 // CHECK-ARM-X64:   br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
 // CHECK-ARM-X64:   [[END_LABEL]]:
@@ -206,7 +206,7 @@ unsigned char test_BitScanReverse64(unsigned long *Index, unsigned __int64 Mask)
 // CHECK-ARM-X64:   [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
 // CHECK-ARM-X64:   [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32
 // CHECK-ARM-X64:   [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
-// CHECK-ARM-X64:   store i32 [[INDEX]], i32* %Index, align 4
+// CHECK-ARM-X64:   store i32 [[INDEX]], ptr %Index, align 4
 // CHECK-ARM-X64:   br label %[[END_LABEL]]
 #endif
 
@@ -214,12 +214,11 @@ void *test_InterlockedExchangePointer(void * volatile *Target, void *Value) {
   return _InterlockedExchangePointer(Target, Value);
 }
 
-// CHECK: define{{.*}}i8* @test_InterlockedExchangePointer(i8** {{[a-z_ ]*}}%Target, i8* {{[a-z_ ]*}}%Value){{.*}}{
-// CHECK:   %[[TARGET:[0-9]+]] = bitcast i8** %Target to [[iPTR:i[0-9]+]]*
-// CHECK:   %[[VALUE:[0-9]+]] = ptrtoint i8* %Value to [[iPTR]]
-// CHECK:   %[[EXCHANGE:[0-9]+]] = atomicrmw xchg [[iPTR]]* %[[TARGET]], [[iPTR]] %[[VALUE]] seq_cst, align {{4|8}}
-// CHECK:   %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to i8*
-// CHECK:   ret i8* %[[RESULT]]
+// CHECK: define{{.*}}ptr @test_InterlockedExchangePointer(ptr {{[a-z_ ]*}}%Target, ptr {{[a-z_ ]*}}%Value){{.*}}{
+// CHECK:   %[[VALUE:[0-9]+]] = ptrtoint ptr %Value to [[iPTR:i[0-9]+]]
+// CHECK:   %[[EXCHANGE:[0-9]+]] = atomicrmw xchg ptr %Target, [[iPTR]] %[[VALUE]] seq_cst, align {{4|8}}
+// CHECK:   %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to ptr
+// CHECK:   ret ptr %[[RESULT]]
 // CHECK: }
 
 void *test_InterlockedCompareExchangePointer(void * volatile *Destination,
@@ -227,14 +226,13 @@ void *test_InterlockedCompareExchangePointer(void * volatile *Destination,
   return _InterlockedCompareExchangePointer(Destination, Exchange, Comparand);
 }
 
-// CHECK: define{{.*}}i8* @test_InterlockedCompareExchangePointer(i8** {{[a-z_ ]*}}%Destination, i8* {{[a-z_ ]*}}%Exchange, i8* {{[a-z_ ]*}}%Comparand){{.*}}{
-// CHECK:   %[[DEST:[0-9]+]] = bitcast i8** %Destination to [[iPTR]]*
-// CHECK:   %[[EXCHANGE:[0-9]+]] = ptrtoint i8* %Exchange to [[iPTR]]
-// CHECK:   %[[COMPARAND:[0-9]+]] = ptrtoint i8* %Comparand to [[iPTR]]
-// CHECK:   %[[XCHG:[0-9]+]] = cmpxchg volatile [[iPTR]]* %[[DEST:[0-9]+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] seq_cst seq_cst, align {{4|8}}
+// CHECK: define{{.*}}ptr @test_InterlockedCompareExchangePointer(ptr {{[a-z_ ]*}}%Destination, ptr {{[a-z_ ]*}}%Exchange, ptr {{[a-z_ ]*}}%Comparand){{.*}}{
+// CHECK:   %[[EXCHANGE:[0-9]+]] = ptrtoint ptr %Exchange to [[iPTR]]
+// CHECK:   %[[COMPARAND:[0-9]+]] = ptrtoint ptr %Comparand to [[iPTR]]
+// CHECK:   %[[XCHG:[0-9]+]] = cmpxchg volatile ptr %[[DEST:.+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] seq_cst seq_cst, align {{4|8}}
 // CHECK:   %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0
-// CHECK:   %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to i8*
-// CHECK:   ret i8* %[[RESULT:[0-9]+]]
+// CHECK:   %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to ptr
+// CHECK:   ret ptr %[[RESULT:[0-9]+]]
 // CHECK: }
 
 void *test_InterlockedCompareExchangePointer_nf(void * volatile *Destination,
@@ -242,165 +240,164 @@ void *test_InterlockedCompareExchangePointer_nf(void * volatile *Destination,
   return _InterlockedCompareExchangePointer_nf(Destination, Exchange, Comparand);
 }
 
-// CHECK: define{{.*}}i8* @test_InterlockedCompareExchangePointer_nf(i8** {{[a-z_ ]*}}%Destination, i8* {{[a-z_ ]*}}%Exchange, i8* {{[a-z_ ]*}}%Comparand){{.*}}{
-// CHECK:   %[[DEST:[0-9]+]] = bitcast i8** %Destination to [[iPTR]]*
-// CHECK:   %[[EXCHANGE:[0-9]+]] = ptrtoint i8* %Exchange to [[iPTR]]
-// CHECK:   %[[COMPARAND:[0-9]+]] = ptrtoint i8* %Comparand to [[iPTR]]
-// CHECK:   %[[XCHG:[0-9]+]] = cmpxchg volatile [[iPTR]]* %[[DEST:[0-9]+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] monotonic monotonic, align {{4|8}}
+// CHECK: define{{.*}}ptr @test_InterlockedCompareExchangePointer_nf(ptr {{[a-z_ ]*}}%Destination, ptr {{[a-z_ ]*}}%Exchange, ptr {{[a-z_ ]*}}%Comparand){{.*}}{
+// CHECK:   %[[EXCHANGE:[0-9]+]] = ptrtoint ptr %Exchange to [[iPTR]]
+// CHECK:   %[[COMPARAND:[0-9]+]] = ptrtoint ptr %Comparand to [[iPTR]]
+// CHECK:   %[[XCHG:[0-9]+]] = cmpxchg volatile ptr %[[DEST:.+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] monotonic monotonic, align {{4|8}}
 // CHECK:   %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0
-// CHECK:   %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to i8*
-// CHECK:   ret i8* %[[RESULT:[0-9]+]]
+// CHECK:   %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to ptr
+// CHECK:   ret ptr %[[RESULT:[0-9]+]]
 // CHECK: }
 
 char test_InterlockedExchange8(char volatile *value, char mask) {
   return _InterlockedExchange8(value, mask);
 }
-// CHECK: define{{.*}}i8 @test_InterlockedExchange8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask seq_cst, align 1
+// CHECK: define{{.*}}i8 @test_InterlockedExchange8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask seq_cst, align 1
 // CHECK:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 short test_InterlockedExchange16(short volatile *value, short mask) {
   return _InterlockedExchange16(value, mask);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedExchange16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedExchange16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask seq_cst, align 2
 // CHECK:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 long test_InterlockedExchange(long volatile *value, long mask) {
   return _InterlockedExchange(value, mask);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedExchange(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedExchange(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask seq_cst, align 4
 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 char test_InterlockedExchangeAdd8(char volatile *value, char mask) {
   return _InterlockedExchangeAdd8(value, mask);
 }
-// CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask seq_cst, align 1
+// CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask seq_cst, align 1
 // CHECK:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 short test_InterlockedExchangeAdd16(short volatile *value, short mask) {
   return _InterlockedExchangeAdd16(value, mask);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask seq_cst, align 2
 // CHECK:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 long test_InterlockedExchangeAdd(long volatile *value, long mask) {
   return _InterlockedExchangeAdd(value, mask);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask seq_cst, align 4
 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 char test_InterlockedExchangeSub8(char volatile *value, char mask) {
   return _InterlockedExchangeSub8(value, mask);
 }
-// CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i8* %value, i8 %mask seq_cst, align 1
+// CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i8 %mask seq_cst, align 1
 // CHECK:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 short test_InterlockedExchangeSub16(short volatile *value, short mask) {
   return _InterlockedExchangeSub16(value, mask);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i16* %value, i16 %mask seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i16 %mask seq_cst, align 2
 // CHECK:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 long test_InterlockedExchangeSub(long volatile *value, long mask) {
   return _InterlockedExchangeSub(value, mask);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i32* %value, i32 %mask seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i32 %mask seq_cst, align 4
 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 char test_InterlockedOr8(char volatile *value, char mask) {
   return _InterlockedOr8(value, mask);
 }
-// CHECK: define{{.*}}i8 @test_InterlockedOr8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask seq_cst, align 1
+// CHECK: define{{.*}}i8 @test_InterlockedOr8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask seq_cst, align 1
 // CHECK:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 short test_InterlockedOr16(short volatile *value, short mask) {
   return _InterlockedOr16(value, mask);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedOr16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedOr16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask seq_cst, align 2
 // CHECK:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 long test_InterlockedOr(long volatile *value, long mask) {
   return _InterlockedOr(value, mask);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedOr(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedOr(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask seq_cst, align 4
 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 char test_InterlockedXor8(char volatile *value, char mask) {
   return _InterlockedXor8(value, mask);
 }
-// CHECK: define{{.*}}i8 @test_InterlockedXor8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask seq_cst, align 1
+// CHECK: define{{.*}}i8 @test_InterlockedXor8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask seq_cst, align 1
 // CHECK:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 short test_InterlockedXor16(short volatile *value, short mask) {
   return _InterlockedXor16(value, mask);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedXor16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedXor16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask seq_cst, align 2
 // CHECK:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 long test_InterlockedXor(long volatile *value, long mask) {
   return _InterlockedXor(value, mask);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedXor(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedXor(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask seq_cst, align 4
 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 char test_InterlockedAnd8(char volatile *value, char mask) {
   return _InterlockedAnd8(value, mask);
 }
-// CHECK: define{{.*}}i8 @test_InterlockedAnd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask seq_cst, align 1
+// CHECK: define{{.*}}i8 @test_InterlockedAnd8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask seq_cst, align 1
 // CHECK:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 short test_InterlockedAnd16(short volatile *value, short mask) {
   return _InterlockedAnd16(value, mask);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedAnd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedAnd16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask seq_cst, align 2
 // CHECK:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 long test_InterlockedAnd(long volatile *value, long mask) {
   return _InterlockedAnd(value, mask);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedAnd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedAnd(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask seq_cst, align 4
 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange, char Comperand) {
   return _InterlockedCompareExchange8(Destination, Exchange, Comperand);
 }
-// CHECK: define{{.*}}i8 @test_InterlockedCompareExchange8(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange seq_cst seq_cst, align 1
+// CHECK: define{{.*}}i8 @test_InterlockedCompareExchange8(ptr{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange seq_cst seq_cst, align 1
 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
 // CHECK: ret i8 [[RESULT]]
 // CHECK: }
@@ -408,8 +405,8 @@ char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange,
 short test_InterlockedCompareExchange16(short volatile *Destination, short Exchange, short Comperand) {
   return _InterlockedCompareExchange16(Destination, Exchange, Comperand);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedCompareExchange16(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange seq_cst seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedCompareExchange16(ptr{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange seq_cst seq_cst, align 2
 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
 // CHECK: ret i16 [[RESULT]]
 // CHECK: }
@@ -417,8 +414,8 @@ short test_InterlockedCompareExchange16(short volatile *Destination, short Excha
 long test_InterlockedCompareExchange(long volatile *Destination, long Exchange, long Comperand) {
   return _InterlockedCompareExchange(Destination, Exchange, Comperand);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst, align 4
 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
 // CHECK: ret i32 [[RESULT]]
 // CHECK: }
@@ -426,8 +423,8 @@ long test_InterlockedCompareExchange(long volatile *Destination, long Exchange,
 __int64 test_InterlockedCompareExchange64(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
   return _InterlockedCompareExchange64(Destination, Exchange, Comperand);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedCompareExchange64(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange seq_cst seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedCompareExchange64(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange seq_cst seq_cst, align 8
 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
 // CHECK: ret i64 [[RESULT]]
 // CHECK: }
@@ -439,21 +436,19 @@ unsigned char test_InterlockedCompareExchange128(
   return _InterlockedCompareExchange128(++Destination, ++ExchangeHigh,
                                         ++ExchangeLow, ++ComparandResult);
 }
-// CHECK-64: define{{.*}}i8 @test_InterlockedCompareExchange128(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%ExchangeHigh, i64{{[a-z_ ]*}}%ExchangeLow, i64*{{[a-z_ ]*}}%ComparandResult){{.*}}{
-// CHECK-64: %incdec.ptr = getelementptr inbounds i64, i64* %Destination, i64 1
+// CHECK-64: define{{.*}}i8 @test_InterlockedCompareExchange128(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%ExchangeHigh, i64{{[a-z_ ]*}}%ExchangeLow, ptr{{[a-z_ ]*}}%ComparandResult){{.*}}{
+// CHECK-64: %incdec.ptr = getelementptr inbounds i64, ptr %Destination, i64 1
 // CHECK-64: %inc = add nsw i64 %ExchangeHigh, 1
 // CHECK-64: %inc1 = add nsw i64 %ExchangeLow, 1
-// CHECK-64: %incdec.ptr2 = getelementptr inbounds i64, i64* %ComparandResult, i64 1
-// CHECK-64: [[DST:%[0-9]+]] = bitcast i64* %incdec.ptr to i128*
-// CHECK-64: [[CNR:%[0-9]+]] = bitcast i64* %incdec.ptr2 to i128*
+// CHECK-64: %incdec.ptr2 = getelementptr inbounds i64, ptr %ComparandResult, i64 1
 // CHECK-64: [[EH:%[0-9]+]] = zext i64 %inc to i128
 // CHECK-64: [[EL:%[0-9]+]] = zext i64 %inc1 to i128
 // CHECK-64: [[EHS:%[0-9]+]] = shl nuw i128 [[EH]], 64
 // CHECK-64: [[EXP:%[0-9]+]] = or i128 [[EHS]], [[EL]]
-// CHECK-64: [[ORG:%[0-9]+]] = load i128, i128* [[CNR]], align 16
-// CHECK-64: [[RES:%[0-9]+]] = cmpxchg volatile i128* [[DST]], i128 [[ORG]], i128 [[EXP]] seq_cst seq_cst, align 16
+// CHECK-64: [[ORG:%[0-9]+]] = load i128, ptr %incdec.ptr2, align 16
+// CHECK-64: [[RES:%[0-9]+]] = cmpxchg volatile ptr %incdec.ptr, i128 [[ORG]], i128 [[EXP]] seq_cst seq_cst, align 16
 // CHECK-64: [[OLD:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 0
-// CHECK-64: store i128 [[OLD]], i128* [[CNR]], align 16
+// CHECK-64: store i128 [[OLD]], ptr %incdec.ptr2, align 16
 // CHECK-64: [[SUC1:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 1
 // CHECK-64: [[SUC8:%[0-9]+]] = zext i1 [[SUC1]] to i8
 // CHECK-64: ret i8 [[SUC8]]
@@ -480,19 +475,19 @@ unsigned char test_InterlockedCompareExchange128_rel(
                                             ExchangeLow, ComparandResult);
 }
 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_acq({{.*}})
-// CHECK-ARM64: cmpxchg volatile i128* %{{.*}}, i128 %{{.*}}, i128 %{{.*}} acquire acquire, align 16
+// CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} acquire acquire, align 16
 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_nf({{.*}})
-// CHECK-ARM64: cmpxchg volatile i128* %{{.*}}, i128 %{{.*}}, i128 %{{.*}} monotonic monotonic, align 16
+// CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} monotonic monotonic, align 16
 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_rel({{.*}})
-// CHECK-ARM64: cmpxchg volatile i128* %{{.*}}, i128 %{{.*}}, i128 %{{.*}} release monotonic, align 16
+// CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} release monotonic, align 16
 #endif
 
 short test_InterlockedIncrement16(short volatile *Addend) {
   return _InterlockedIncrement16(++Addend);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedIncrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK: %incdec.ptr = getelementptr inbounds i16, i16* %Addend, {{i64|i32}} 1
-// CHECK: [[TMP:%[0-9]+]] = atomicrmw add i16* %incdec.ptr, i16 1 seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedIncrement16(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: %incdec.ptr = getelementptr inbounds i16, ptr %Addend, {{i64|i32}} 1
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %incdec.ptr, i16 1 seq_cst, align 2
 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
 // CHECK: ret i16 [[RESULT]]
 // CHECK: }
@@ -500,9 +495,9 @@ short test_InterlockedIncrement16(short volatile *Addend) {
 long test_InterlockedIncrement(long volatile *Addend) {
   return _InterlockedIncrement(++Addend);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedIncrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK: %incdec.ptr = getelementptr inbounds i32, i32* %Addend, {{i64|i32}} 1
-// CHECK: [[TMP:%[0-9]+]] = atomicrmw add i32* %incdec.ptr, i32 1 seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedIncrement(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: %incdec.ptr = getelementptr inbounds i32, ptr %Addend, {{i64|i32}} 1
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %incdec.ptr, i32 1 seq_cst, align 4
 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
 // CHECK: ret i32 [[RESULT]]
 // CHECK: }
@@ -510,8 +505,8 @@ long test_InterlockedIncrement(long volatile *Addend) {
 short test_InterlockedDecrement16(short volatile *Addend) {
   return _InterlockedDecrement16(Addend);
 }
-// CHECK: define{{.*}}i16 @test_InterlockedDecrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 seq_cst, align 2
+// CHECK: define{{.*}}i16 @test_InterlockedDecrement16(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 seq_cst, align 2
 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
 // CHECK: ret i16 [[RESULT]]
 // CHECK: }
@@ -519,8 +514,8 @@ short test_InterlockedDecrement16(short volatile *Addend) {
 long test_InterlockedDecrement(long volatile *Addend) {
   return _InterlockedDecrement(Addend);
 }
-// CHECK: define{{.*}}i32 @test_InterlockedDecrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 seq_cst, align 4
+// CHECK: define{{.*}}i32 @test_InterlockedDecrement(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 seq_cst, align 4
 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
 // CHECK: ret i32 [[RESULT]]
 // CHECK: }
@@ -530,84 +525,84 @@ short test_iso_volatile_load16(short volatile *p) { return __iso_volatile_load16
 int test_iso_volatile_load32(int volatile *p) { return __iso_volatile_load32(p); }
 __int64 test_iso_volatile_load64(__int64 volatile *p) { return __iso_volatile_load64(p); }
 
-// CHECK: define{{.*}}i8 @test_iso_volatile_load8(i8*{{[a-z_ ]*}}%p)
-// CHECK: = load volatile i8, i8* %p
-// CHECK: define{{.*}}i16 @test_iso_volatile_load16(i16*{{[a-z_ ]*}}%p)
-// CHECK: = load volatile i16, i16* %p
-// CHECK: define{{.*}}i32 @test_iso_volatile_load32(i32*{{[a-z_ ]*}}%p)
-// CHECK: = load volatile i32, i32* %p
-// CHECK: define{{.*}}i64 @test_iso_volatile_load64(i64*{{[a-z_ ]*}}%p)
-// CHECK: = load volatile i64, i64* %p
+// CHECK: define{{.*}}i8 @test_iso_volatile_load8(ptr{{[a-z_ ]*}}%p)
+// CHECK: = load volatile i8, ptr %p
+// CHECK: define{{.*}}i16 @test_iso_volatile_load16(ptr{{[a-z_ ]*}}%p)
+// CHECK: = load volatile i16, ptr %p
+// CHECK: define{{.*}}i32 @test_iso_volatile_load32(ptr{{[a-z_ ]*}}%p)
+// CHECK: = load volatile i32, ptr %p
+// CHECK: define{{.*}}i64 @test_iso_volatile_load64(ptr{{[a-z_ ]*}}%p)
+// CHECK: = load volatile i64, ptr %p
 
 void test_iso_volatile_store8(char volatile *p, char v) { __iso_volatile_store8(p, v); }
 void test_iso_volatile_store16(short volatile *p, short v) { __iso_volatile_store16(p, v); }
 void test_iso_volatile_store32(int volatile *p, int v) { __iso_volatile_store32(p, v); }
 void test_iso_volatile_store64(__int64 volatile *p, __int64 v) { __iso_volatile_store64(p, v); }
 
-// CHECK: define{{.*}}void @test_iso_volatile_store8(i8*{{[a-z_ ]*}}%p, i8 {{[a-z_ ]*}}%v)
-// CHECK: store volatile i8 %v, i8* %p
-// CHECK: define{{.*}}void @test_iso_volatile_store16(i16*{{[a-z_ ]*}}%p, i16 {{[a-z_ ]*}}%v)
-// CHECK: store volatile i16 %v, i16* %p
-// CHECK: define{{.*}}void @test_iso_volatile_store32(i32*{{[a-z_ ]*}}%p, i32 {{[a-z_ ]*}}%v)
-// CHECK: store volatile i32 %v, i32* %p
-// CHECK: define{{.*}}void @test_iso_volatile_store64(i64*{{[a-z_ ]*}}%p, i64 {{[a-z_ ]*}}%v)
-// CHECK: store volatile i64 %v, i64* %p
+// CHECK: define{{.*}}void @test_iso_volatile_store8(ptr{{[a-z_ ]*}}%p, i8 {{[a-z_ ]*}}%v)
+// CHECK: store volatile i8 %v, ptr %p
+// CHECK: define{{.*}}void @test_iso_volatile_store16(ptr{{[a-z_ ]*}}%p, i16 {{[a-z_ ]*}}%v)
+// CHECK: store volatile i16 %v, ptr %p
+// CHECK: define{{.*}}void @test_iso_volatile_store32(ptr{{[a-z_ ]*}}%p, i32 {{[a-z_ ]*}}%v)
+// CHECK: store volatile i32 %v, ptr %p
+// CHECK: define{{.*}}void @test_iso_volatile_store64(ptr{{[a-z_ ]*}}%p, i64 {{[a-z_ ]*}}%v)
+// CHECK: store volatile i64 %v, ptr %p
 
 
 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 __int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchange64(value, mask);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedExchange64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask seq_cst, align 8
 // CHECK:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 __int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchangeAdd64(value, mask);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask seq_cst, align 8
 // CHECK:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 __int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchangeSub64(value, mask);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i64 %mask seq_cst, align 8
 // CHECK:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 __int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) {
   return _InterlockedOr64(value, mask);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedOr64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask seq_cst, align 8
 // CHECK:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 __int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) {
   return _InterlockedXor64(value, mask);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedXor64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask seq_cst, align 8
 // CHECK:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 __int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) {
   return _InterlockedAnd64(value, mask);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedAnd64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask seq_cst, align 8
 // CHECK:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK: }
 
 __int64 test_InterlockedIncrement64(__int64 volatile *Addend) {
   return _InterlockedIncrement64(Addend);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedIncrement64(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 seq_cst, align 8
 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
 // CHECK: ret i64 [[RESULT]]
 // CHECK: }
@@ -615,8 +610,8 @@ __int64 test_InterlockedIncrement64(__int64 volatile *Addend) {
 __int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
   return _InterlockedDecrement64(Addend);
 }
-// CHECK: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst, align 8
+// CHECK: define{{.*}}i64 @test_InterlockedDecrement64(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 seq_cst, align 8
 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
 // CHECK: ret i64 [[RESULT]]
 // CHECK: }
@@ -625,49 +620,49 @@ __int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
 
 #if defined(__i386__) || defined(__x86_64__)
 long test_InterlockedExchange_HLEAcquire(long volatile *Target, long Value) {
-// CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
-// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %Target, i32 %Value, i32* elementtype(i32) %Target)
+// CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLEAcquire(ptr{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
+// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Target, i32 %Value, ptr elementtype(i32) %Target)
   return _InterlockedExchange_HLEAcquire(Target, Value);
 }
 long test_InterlockedExchange_HLERelease(long volatile *Target, long Value) {
-// CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLERelease(i32*{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
-// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %Target, i32 %Value, i32* elementtype(i32) %Target)
+// CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLERelease(ptr{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
+// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Target, i32 %Value, ptr elementtype(i32) %Target)
   return _InterlockedExchange_HLERelease(Target, Value);
 }
 long test_InterlockedCompareExchange_HLEAcquire(long volatile *Destination,
                                                 long Exchange, long Comparand) {
-// CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
-// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, i32* elementtype(i32) %Destination)
+// CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
+// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, ptr elementtype(i32) %Destination)
   return _InterlockedCompareExchange_HLEAcquire(Destination, Exchange, Comparand);
 }
 long test_InterlockedCompareExchange_HLERelease(long volatile *Destination,
                                             long Exchange, long Comparand) {
-// CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
-// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, i32* elementtype(i32) %Destination)
+// CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
+// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, ptr elementtype(i32) %Destination)
   return _InterlockedCompareExchange_HLERelease(Destination, Exchange, Comparand);
 }
 #endif
 #if defined(__x86_64__)
 __int64 test_InterlockedExchange64_HLEAcquire(__int64 volatile *Target, __int64 Value) {
-// CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
-// CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %Target, i64 %Value, i64* elementtype(i64) %Target)
+// CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLEAcquire(ptr{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
+// CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Target, i64 %Value, ptr elementtype(i64) %Target)
   return _InterlockedExchange64_HLEAcquire(Target, Value);
 }
 __int64 test_InterlockedExchange64_HLERelease(__int64 volatile *Target, __int64 Value) {
-// CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLERelease(i64*{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
-// CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %Target, i64 %Value, i64* elementtype(i64) %Target)
+// CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLERelease(ptr{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
+// CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Target, i64 %Value, ptr elementtype(i64) %Target)
   return _InterlockedExchange64_HLERelease(Target, Value);
 }
 __int64 test_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *Destination,
                                                      __int64 Exchange, __int64 Comparand) {
-// CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
-// CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, i64* elementtype(i64) %Destination)
+// CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
+// CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, ptr elementtype(i64) %Destination)
   return _InterlockedCompareExchange64_HLEAcquire(Destination, Exchange, Comparand);
 }
 __int64 test_InterlockedCompareExchange64_HLERelease(__int64 volatile *Destination,
                                                      __int64 Exchange, __int64 Comparand) {
-// CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
-// CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, i64* elementtype(i64) %Destination)
+// CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
+// CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, ptr elementtype(i64) %Destination)
   return _InterlockedCompareExchange64_HLERelease(Destination, Exchange, Comparand);
 }
 #endif
@@ -676,178 +671,178 @@ __int64 test_InterlockedCompareExchange64_HLERelease(__int64 volatile *Destinati
 char test_InterlockedExchangeAdd8_acq(char volatile *value, char mask) {
   return _InterlockedExchangeAdd8_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask acquire, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask acquire, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 char test_InterlockedExchangeAdd8_rel(char volatile *value, char mask) {
   return _InterlockedExchangeAdd8_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask release, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask release, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 char test_InterlockedExchangeAdd8_nf(char volatile *value, char mask) {
   return _InterlockedExchangeAdd8_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask monotonic, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask monotonic, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 short test_InterlockedExchangeAdd16_acq(short volatile *value, short mask) {
   return _InterlockedExchangeAdd16_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask acquire, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask acquire, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 short test_InterlockedExchangeAdd16_rel(short volatile *value, short mask) {
   return _InterlockedExchangeAdd16_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask release, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask release, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 short test_InterlockedExchangeAdd16_nf(short volatile *value, short mask) {
   return _InterlockedExchangeAdd16_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask monotonic, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 long test_InterlockedExchangeAdd_acq(long volatile *value, long mask) {
   return _InterlockedExchangeAdd_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask acquire, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask acquire, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 long test_InterlockedExchangeAdd_rel(long volatile *value, long mask) {
   return _InterlockedExchangeAdd_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask release, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask release, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 long test_InterlockedExchangeAdd_nf(long volatile *value, long mask) {
   return _InterlockedExchangeAdd_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask monotonic, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 __int64 test_InterlockedExchangeAdd64_acq(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchangeAdd64_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask acquire, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask acquire, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 __int64 test_InterlockedExchangeAdd64_rel(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchangeAdd64_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask release, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask release, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 __int64 test_InterlockedExchangeAdd64_nf(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchangeAdd64_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask monotonic, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedExchange8_acq(char volatile *value, char mask) {
   return _InterlockedExchange8_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask acquire, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask acquire, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 char test_InterlockedExchange8_rel(char volatile *value, char mask) {
   return _InterlockedExchange8_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask release, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask release, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 char test_InterlockedExchange8_nf(char volatile *value, char mask) {
   return _InterlockedExchange8_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask monotonic, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask monotonic, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 short test_InterlockedExchange16_acq(short volatile *value, short mask) {
   return _InterlockedExchange16_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask acquire, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask acquire, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 short test_InterlockedExchange16_rel(short volatile *value, short mask) {
   return _InterlockedExchange16_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask release, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask release, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 short test_InterlockedExchange16_nf(short volatile *value, short mask) {
   return _InterlockedExchange16_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask monotonic, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 long test_InterlockedExchange_acq(long volatile *value, long mask) {
   return _InterlockedExchange_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask acquire, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask acquire, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 long test_InterlockedExchange_rel(long volatile *value, long mask) {
   return _InterlockedExchange_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask release, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask release, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 long test_InterlockedExchange_nf(long volatile *value, long mask) {
   return _InterlockedExchange_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask monotonic, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 __int64 test_InterlockedExchange64_acq(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchange64_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask acquire, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask acquire, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 __int64 test_InterlockedExchange64_rel(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchange64_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask release, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask release, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 __int64 test_InterlockedExchange64_nf(__int64 volatile *value, __int64 mask) {
   return _InterlockedExchange64_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask monotonic, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Exchange, char Comperand) {
   return _InterlockedCompareExchange8_acq(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange acquire acquire, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(ptr{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange acquire acquire, align 1
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -855,8 +850,8 @@ char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Excha
 char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Exchange, char Comperand) {
   return _InterlockedCompareExchange8_rel(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange release monotonic, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(ptr{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange release monotonic, align 1
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -864,8 +859,8 @@ char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Excha
 char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchange, char Comperand) {
   return _InterlockedCompareExchange8_nf(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(ptr{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic, align 1
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -873,8 +868,8 @@ char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchan
 short test_InterlockedCompareExchange16_acq(short volatile *Destination, short Exchange, short Comperand) {
   return _InterlockedCompareExchange16_acq(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange acquire acquire, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(ptr{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange acquire acquire, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -882,8 +877,8 @@ short test_InterlockedCompareExchange16_acq(short volatile *Destination, short E
 short test_InterlockedCompareExchange16_rel(short volatile *Destination, short Exchange, short Comperand) {
   return _InterlockedCompareExchange16_rel(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange release monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(ptr{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange release monotonic, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -891,8 +886,8 @@ short test_InterlockedCompareExchange16_rel(short volatile *Destination, short E
 short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Exchange, short Comperand) {
   return _InterlockedCompareExchange16_nf(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(ptr{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -900,8 +895,8 @@ short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Ex
 long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchange, long Comperand) {
   return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange acquire acquire, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -909,8 +904,8 @@ long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchan
 long test_InterlockedCompareExchange_rel(long volatile *Destination, long Exchange, long Comperand) {
   return _InterlockedCompareExchange_rel(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_rel(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange release monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_rel(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange release monotonic, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -918,8 +913,8 @@ long test_InterlockedCompareExchange_rel(long volatile *Destination, long Exchan
 long test_InterlockedCompareExchange_nf(long volatile *Destination, long Exchange, long Comperand) {
   return _InterlockedCompareExchange_nf(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_nf(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_nf(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -927,8 +922,8 @@ long test_InterlockedCompareExchange_nf(long volatile *Destination, long Exchang
 __int64 test_InterlockedCompareExchange64_acq(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
   return _InterlockedCompareExchange64_acq(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_acq(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange acquire acquire, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_acq(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange acquire acquire, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -936,8 +931,8 @@ __int64 test_InterlockedCompareExchange64_acq(__int64 volatile *Destination, __i
 __int64 test_InterlockedCompareExchange64_rel(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
   return _InterlockedCompareExchange64_rel(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_rel(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange release monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_rel(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange release monotonic, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -945,8 +940,8 @@ __int64 test_InterlockedCompareExchange64_rel(__int64 volatile *Destination, __i
 __int64 test_InterlockedCompareExchange64_nf(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
   return _InterlockedCompareExchange64_nf(Destination, Exchange, Comperand);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_nf(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange monotonic monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_nf(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange monotonic monotonic, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -954,296 +949,296 @@ __int64 test_InterlockedCompareExchange64_nf(__int64 volatile *Destination, __in
 char test_InterlockedOr8_acq(char volatile *value, char mask) {
   return _InterlockedOr8_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask acquire, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask acquire, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedOr8_rel(char volatile *value, char mask) {
   return _InterlockedOr8_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask release, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask release, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedOr8_nf(char volatile *value, char mask) {
   return _InterlockedOr8_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask monotonic, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask monotonic, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedOr16_acq(short volatile *value, short mask) {
   return _InterlockedOr16_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask acquire, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask acquire, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedOr16_rel(short volatile *value, short mask) {
   return _InterlockedOr16_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask release, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask release, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedOr16_nf(short volatile *value, short mask) {
   return _InterlockedOr16_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask monotonic, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedOr_acq(long volatile *value, long mask) {
   return _InterlockedOr_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask acquire, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask acquire, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedOr_rel(long volatile *value, long mask) {
   return _InterlockedOr_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask release, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask release, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedOr_nf(long volatile *value, long mask) {
   return _InterlockedOr_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask monotonic, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedOr64_acq(__int64 volatile *value, __int64 mask) {
   return _InterlockedOr64_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask acquire, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask acquire, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedOr64_rel(__int64 volatile *value, __int64 mask) {
   return _InterlockedOr64_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask release, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask release, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedOr64_nf(__int64 volatile *value, __int64 mask) {
   return _InterlockedOr64_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask monotonic, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedXor8_acq(char volatile *value, char mask) {
   return _InterlockedXor8_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask acquire, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask acquire, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedXor8_rel(char volatile *value, char mask) {
   return _InterlockedXor8_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask release, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask release, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedXor8_nf(char volatile *value, char mask) {
   return _InterlockedXor8_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask monotonic, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask monotonic, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedXor16_acq(short volatile *value, short mask) {
   return _InterlockedXor16_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask acquire, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask acquire, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedXor16_rel(short volatile *value, short mask) {
   return _InterlockedXor16_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask release, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask release, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedXor16_nf(short volatile *value, short mask) {
   return _InterlockedXor16_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask monotonic, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedXor_acq(long volatile *value, long mask) {
   return _InterlockedXor_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask acquire, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask acquire, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedXor_rel(long volatile *value, long mask) {
   return _InterlockedXor_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask release, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask release, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedXor_nf(long volatile *value, long mask) {
   return _InterlockedXor_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask monotonic, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedXor64_acq(__int64 volatile *value, __int64 mask) {
   return _InterlockedXor64_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask acquire, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask acquire, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedXor64_rel(__int64 volatile *value, __int64 mask) {
   return _InterlockedXor64_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask release, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask release, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedXor64_nf(__int64 volatile *value, __int64 mask) {
   return _InterlockedXor64_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask monotonic, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedAnd8_acq(char volatile *value, char mask) {
   return _InterlockedAnd8_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask acquire, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask acquire, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedAnd8_rel(char volatile *value, char mask) {
   return _InterlockedAnd8_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask release, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask release, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 char test_InterlockedAnd8_nf(char volatile *value, char mask) {
   return _InterlockedAnd8_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask monotonic, align 1
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask monotonic, align 1
 // CHECK-ARM-ARM64:   ret i8 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedAnd16_acq(short volatile *value, short mask) {
   return _InterlockedAnd16_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask acquire, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask acquire, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedAnd16_rel(short volatile *value, short mask) {
   return _InterlockedAnd16_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask release, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask release, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedAnd16_nf(short volatile *value, short mask) {
   return _InterlockedAnd16_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask monotonic, align 2
 // CHECK-ARM-ARM64:   ret i16 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedAnd_acq(long volatile *value, long mask) {
   return _InterlockedAnd_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask acquire, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask acquire, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedAnd_rel(long volatile *value, long mask) {
   return _InterlockedAnd_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask release, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask release, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 long test_InterlockedAnd_nf(long volatile *value, long mask) {
   return _InterlockedAnd_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask monotonic, align 4
 // CHECK-ARM-ARM64:   ret i32 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedAnd64_acq(__int64 volatile *value, __int64 mask) {
   return _InterlockedAnd64_acq(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask acquire, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask acquire, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedAnd64_rel(__int64 volatile *value, __int64 mask) {
   return _InterlockedAnd64_rel(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask release, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask release, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 __int64 test_InterlockedAnd64_nf(__int64 volatile *value, __int64 mask) {
   return _InterlockedAnd64_nf(value, mask);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
+// CHECK-ARM-ARM64:   [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask monotonic, align 8
 // CHECK-ARM-ARM64:   ret i64 [[RESULT:%[0-9]+]]
 // CHECK-ARM-ARM64: }
 
 short test_InterlockedIncrement16_acq(short volatile *Addend) {
   return _InterlockedIncrement16_acq(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 acquire, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 acquire, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1251,8 +1246,8 @@ short test_InterlockedIncrement16_acq(short volatile *Addend) {
 short test_InterlockedIncrement16_rel(short volatile *Addend) {
   return _InterlockedIncrement16_rel(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 release, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 release, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1260,8 +1255,8 @@ short test_InterlockedIncrement16_rel(short volatile *Addend) {
 short test_InterlockedIncrement16_nf(short volatile *Addend) {
   return _InterlockedIncrement16_nf(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 monotonic, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1269,8 +1264,8 @@ short test_InterlockedIncrement16_nf(short volatile *Addend) {
 long test_InterlockedIncrement_acq(long volatile *Addend) {
   return _InterlockedIncrement_acq(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 acquire, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 acquire, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1278,8 +1273,8 @@ long test_InterlockedIncrement_acq(long volatile *Addend) {
 long test_InterlockedIncrement_rel(long volatile *Addend) {
   return _InterlockedIncrement_rel(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 release, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 release, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1287,8 +1282,8 @@ long test_InterlockedIncrement_rel(long volatile *Addend) {
 long test_InterlockedIncrement_nf(long volatile *Addend) {
   return _InterlockedIncrement_nf(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 monotonic, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1296,8 +1291,8 @@ long test_InterlockedIncrement_nf(long volatile *Addend) {
 __int64 test_InterlockedIncrement64_acq(__int64 volatile *Addend) {
   return _InterlockedIncrement64_acq(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 acquire, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 acquire, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1305,8 +1300,8 @@ __int64 test_InterlockedIncrement64_acq(__int64 volatile *Addend) {
 __int64 test_InterlockedIncrement64_rel(__int64 volatile *Addend) {
   return _InterlockedIncrement64_rel(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 release, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 release, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1314,8 +1309,8 @@ __int64 test_InterlockedIncrement64_rel(__int64 volatile *Addend) {
 __int64 test_InterlockedIncrement64_nf(__int64 volatile *Addend) {
   return _InterlockedIncrement64_nf(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 monotonic, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1323,8 +1318,8 @@ __int64 test_InterlockedIncrement64_nf(__int64 volatile *Addend) {
 short test_InterlockedDecrement16_acq(short volatile *Addend) {
   return _InterlockedDecrement16_acq(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 acquire, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 acquire, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1332,8 +1327,8 @@ short test_InterlockedDecrement16_acq(short volatile *Addend) {
 short test_InterlockedDecrement16_rel(short volatile *Addend) {
   return _InterlockedDecrement16_rel(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 release, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 release, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1341,8 +1336,8 @@ short test_InterlockedDecrement16_rel(short volatile *Addend) {
 short test_InterlockedDecrement16_nf(short volatile *Addend) {
   return _InterlockedDecrement16_nf(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 monotonic, align 2
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 monotonic, align 2
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1350,8 +1345,8 @@ short test_InterlockedDecrement16_nf(short volatile *Addend) {
 long test_InterlockedDecrement_acq(long volatile *Addend) {
   return _InterlockedDecrement_acq(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 acquire, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 acquire, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1359,8 +1354,8 @@ long test_InterlockedDecrement_acq(long volatile *Addend) {
 long test_InterlockedDecrement_rel(long volatile *Addend) {
   return _InterlockedDecrement_rel(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 release, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 release, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1368,8 +1363,8 @@ long test_InterlockedDecrement_rel(long volatile *Addend) {
 long test_InterlockedDecrement_nf(long volatile *Addend) {
   return _InterlockedDecrement_nf(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 monotonic, align 4
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 monotonic, align 4
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1377,8 +1372,8 @@ long test_InterlockedDecrement_nf(long volatile *Addend) {
 __int64 test_InterlockedDecrement64_acq(__int64 volatile *Addend) {
   return _InterlockedDecrement64_acq(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 acquire, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 acquire, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1386,8 +1381,8 @@ __int64 test_InterlockedDecrement64_acq(__int64 volatile *Addend) {
 __int64 test_InterlockedDecrement64_rel(__int64 volatile *Addend) {
   return _InterlockedDecrement64_rel(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 release, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 release, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }
@@ -1395,8 +1390,8 @@ __int64 test_InterlockedDecrement64_rel(__int64 volatile *Addend) {
 __int64 test_InterlockedDecrement64_nf(__int64 volatile *Addend) {
   return _InterlockedDecrement64_nf(Addend);
 }
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 monotonic, align 8
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 monotonic, align 8
 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
 // CHECK-ARM-ARM64: }

diff  --git a/clang/test/CodeGen/no-bitfield-type-align.c b/clang/test/CodeGen/no-bitfield-type-align.c
index b143f3973c5f9..53ed5e9ad8f85 100644
--- a/clang/test/CodeGen/no-bitfield-type-align.c
+++ b/clang/test/CodeGen/no-bitfield-type-align.c
@@ -1,6 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-apple-darwin -fno-bitfield-type-align -emit-llvm -o - %s | FileCheck %s
-
-// CHECK: %[[STRUCT_S:.*]] = type { i32 }
+// RUN: %clang_cc1 -triple x86_64-apple-darwin -fno-bitfield-type-align -emit-llvm -o - %s | FileCheck %s
 
 struct S {
   unsigned short:   0;
@@ -9,21 +7,21 @@ struct S {
   unsigned short  f2:15;
 };
 
-// CHECK: define{{.*}} void @test_zero_width_bitfield(%[[STRUCT_S]]* noundef %[[A:.*]])
-// CHECK: %[[BF_LOAD:.*]] = load i32, i32* %[[V1:.*]], align 1
+// CHECK: define{{.*}} void @test_zero_width_bitfield(ptr noundef %[[A:.*]])
+// CHECK: %[[BF_LOAD:.*]] = load i32, ptr %[[V1:.*]], align 1
 // CHECK: %[[BF_CLEAR:.*]] = and i32 %[[BF_LOAD]], 32767
 // CHECK: %[[BF_CAST:.*]] = trunc i32 %[[BF_CLEAR]] to i16
 // CHECK: %[[CONV:.*]] = zext i16 %[[BF_CAST]] to i32
 // CHECK: %[[ADD:.*]] = add nsw i32 %[[CONV]], 1
 // CHECK: %[[CONV1:.*]] = trunc i32 %[[ADD]] to i16
 // CHECK: %[[V2:.*]] = zext i16 %[[CONV1]] to i32
-// CHECK: %[[BF_LOAD2:.*]] = load i32, i32* %[[V1]], align 1
+// CHECK: %[[BF_LOAD2:.*]] = load i32, ptr %[[V1]], align 1
 // CHECK: %[[BF_VALUE:.*]] = and i32 %[[V2]], 32767
 // CHECK: %[[BF_CLEAR3:.*]] = and i32 %[[BF_LOAD2]], -32768
 // CHECK: %[[BF_SET:.*]] = or i32 %[[BF_CLEAR3]], %[[BF_VALUE]]
-// CHECK: store i32 %[[BF_SET]], i32* %[[V1]], align 1
+// CHECK: store i32 %[[BF_SET]], ptr %[[V1]], align 1
 
-// CHECK: %[[BF_LOAD4:.*]] = load i32, i32* %[[V4:.*]], align 1
+// CHECK: %[[BF_LOAD4:.*]] = load i32, ptr %[[V4:.*]], align 1
 // CHECK: %[[BF_LSHR:.*]] = lshr i32 %[[BF_LOAD4]], 15
 // CHECK: %[[BF_CLEAR5:.*]] = and i32 %[[BF_LSHR]], 32767
 // CHECK: %[[BF_CAST6:.*]] = trunc i32 %[[BF_CLEAR5]] to i16
@@ -31,12 +29,12 @@ struct S {
 // CHECK: %[[ADD8:.*]] = add nsw i32 %[[CONV7]], 2
 // CHECK: %[[CONV9:.*]] = trunc i32 %[[ADD8]] to i16
 // CHECK: %[[V5:.*]] = zext i16 %[[CONV9]] to i32
-// CHECK: %[[BF_LOAD10:.*]] = load i32, i32* %[[V4]], align 1
+// CHECK: %[[BF_LOAD10:.*]] = load i32, ptr %[[V4]], align 1
 // CHECK: %[[BF_VALUE11:.*]] = and i32 %[[V5]], 32767
 // CHECK: %[[BF_SHL:.*]] = shl i32 %[[BF_VALUE11]], 15
 // CHECK: %[[BF_CLEAR12:.*]] = and i32 %[[BF_LOAD10]], -1073709057
 // CHECK: %[[BF_SET13:.*]] = or i32 %[[BF_CLEAR12]], %[[BF_SHL]]
-// CHECK: store i32 %[[BF_SET13]], i32* %[[V4]], align 1
+// CHECK: store i32 %[[BF_SET13]], ptr %[[V4]], align 1
 
 void test_zero_width_bitfield(struct S *a) {
   a->f1 += 1;

diff  --git a/clang/test/CodeGen/no-builtin.cpp b/clang/test/CodeGen/no-builtin.cpp
index cca13aa486d8a..14bae1fe1a223 100644
--- a/clang/test/CodeGen/no-builtin.cpp
+++ b/clang/test/CodeGen/no-builtin.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-linux-gnu -S -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -S -emit-llvm -o - %s | FileCheck %s
 
 // CHECK-LABEL: define{{.*}} void @foo_no_mempcy() #0
 extern "C" void foo_no_mempcy() __attribute__((no_builtin("memcpy"))) {}
@@ -28,15 +28,15 @@ struct B : public A {
   virtual ~B();
 };
 
-// CHECK-LABEL: define{{.*}} void @call_a_foo(%struct.A* noundef %a) #3
+// CHECK-LABEL: define{{.*}} void @call_a_foo(ptr noundef %a) #3
 extern "C" void call_a_foo(A *a) {
-  // CHECK: %call = call noundef i32 %2(%struct.A* {{[^,]*}} %0)
+  // CHECK: %call = call noundef i32 %1(ptr {{[^,]*}} %0)
   a->foo(); // virtual call is not annotated
 }
 
-// CHECK-LABEL: define{{.*}} void @call_b_foo(%struct.B* noundef %b) #3
+// CHECK-LABEL: define{{.*}} void @call_b_foo(ptr noundef %b) #3
 extern "C" void call_b_foo(B *b) {
-  // CHECK: %call = call noundef i32 %2(%struct.B* {{[^,]*}} %0)
+  // CHECK: %call = call noundef i32 %1(ptr {{[^,]*}} %0)
   b->foo(); // virtual call is not annotated
 }
 
@@ -49,8 +49,8 @@ extern "C" void call_foo_no_mempcy() {
 A::~A() {} // Anchoring A so A::foo() gets generated
 B::~B() {} // Anchoring B so B::foo() gets generated
 
-// CHECK-LABEL: define linkonce_odr noundef i32 @_ZNK1A3fooEv(%struct.A* noundef{{[^,]*}} %this) unnamed_addr #0 comdat align 2
-// CHECK-LABEL: define linkonce_odr noundef i32 @_ZNK1B3fooEv(%struct.B* noundef{{[^,]*}} %this) unnamed_addr #6 comdat align 2
+// CHECK-LABEL: define linkonce_odr noundef i32 @_ZNK1A3fooEv(ptr noundef{{[^,]*}} %this) unnamed_addr #0 comdat align 2
+// CHECK-LABEL: define linkonce_odr noundef i32 @_ZNK1B3fooEv(ptr noundef{{[^,]*}} %this) unnamed_addr #6 comdat align 2
 
 // CHECK:     attributes #0 = {{{.*}}"no-builtin-memcpy"{{.*}}}
 // CHECK-NOT: attributes #0 = {{{.*}}"no-builtin-memmove"{{.*}}}

diff  --git a/clang/test/CodeGen/packed-nest-unpacked.c b/clang/test/CodeGen/packed-nest-unpacked.c
index 50479efbca106..318345bed90aa 100644
--- a/clang/test/CodeGen/packed-nest-unpacked.c
+++ b/clang/test/CodeGen/packed-nest-unpacked.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers %s -triple x86_64-apple-macosx10.7.2 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple x86_64-apple-macosx10.7.2 -emit-llvm -o - | FileCheck %s
 
 struct X { int x[6]; };
 struct Y { char x[13]; struct X y; } __attribute((packed));
@@ -9,40 +9,40 @@ struct X foo(void);
 // <rdar://problem/10463337>
 struct X test1(void) {
   // CHECK: @test1
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* align 1 bitcast (%struct.X* getelementptr inbounds (%struct.Y, %struct.Y* @g, i32 0, i32 1) to i8*), i64 24, i1 false)
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr {{.*}}, ptr align 1 getelementptr inbounds (%struct.Y, ptr @g, i32 0, i32 1), i64 24, i1 false)
   return g.y;
 }
 struct X test2(void) {
   // CHECK: @test2
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* align 1 bitcast (%struct.X* getelementptr inbounds (%struct.Y, %struct.Y* @g, i32 0, i32 1) to i8*), i64 24, i1 false)
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr {{.*}}, ptr align 1 getelementptr inbounds (%struct.Y, ptr @g, i32 0, i32 1), i64 24, i1 false)
   struct X a = g.y;
   return a;
 }
 
 void test3(struct X a) {
   // CHECK: @test3
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 bitcast (%struct.X* getelementptr inbounds (%struct.Y, %struct.Y* @g, i32 0, i32 1) to i8*), i8* {{.*}}, i64 24, i1 false)
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 getelementptr inbounds (%struct.Y, ptr @g, i32 0, i32 1), ptr {{.*}}, i64 24, i1 false)
   g.y = a;
 }
 
 // <rdar://problem/10530444>
 void test4(void) {
   // CHECK: @test4
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* align 1 bitcast (%struct.X* getelementptr inbounds (%struct.Y, %struct.Y* @g, i32 0, i32 1) to i8*), i64 24, i1 false)
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr {{.*}}, ptr align 1 getelementptr inbounds (%struct.Y, ptr @g, i32 0, i32 1), i64 24, i1 false)
   f(g.y);
 }
 
 // PR12395
 int test5(void) {
   // CHECK: @test5
-  // CHECK: load i32, i32* getelementptr inbounds (%struct.Y, %struct.Y* @g, i32 0, i32 1, i32 0, i64 0), align 1
+  // CHECK: load i32, ptr getelementptr inbounds (%struct.Y, ptr @g, i32 0, i32 1), align 1
   return g.y.x[0];
 }
 
 // <rdar://problem/11220251>
 void test6(void) {
   // CHECK: @test6
-  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 bitcast (%struct.X* getelementptr inbounds (%struct.Y, %struct.Y* @g, i32 0, i32 1) to i8*), i8* align 4 %{{.*}}, i64 24, i1 false)
+  // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 1 getelementptr inbounds (%struct.Y, ptr @g, i32 0, i32 1), ptr align 4 %{{.*}}, i64 24, i1 false)
   g.y = foo();
 }
 
@@ -60,14 +60,14 @@ struct YBitfield gbitfield;
 
 unsigned test7(void) {
   // CHECK: @test7
-  // CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 1
+  // CHECK: load i32, ptr getelementptr inbounds (%struct.YBitfield, ptr @gbitfield, i32 0, i32 1), align 1
   return gbitfield.y.b2;
 }
 
 void test8(unsigned x) {
   // CHECK: @test8
-  // CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 1
-  // CHECK: store i32 {{.*}}, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 1
+  // CHECK: load i32, ptr getelementptr inbounds (%struct.YBitfield, ptr @gbitfield, i32 0, i32 1), align 1
+  // CHECK: store i32 {{.*}}, ptr getelementptr inbounds (%struct.YBitfield, ptr @gbitfield, i32 0, i32 1), align 1
   gbitfield.y.b2 = x;
 }
 
@@ -81,14 +81,14 @@ struct TBitfield tbitfield;
 
 unsigned test9(void) {
   // CHECK: @test9
-  // CHECK: load i16, i16* getelementptr inbounds (%struct.TBitfield, %struct.TBitfield* @tbitfield, i32 0, i32 2), align 1
+  // CHECK: load i16, ptr getelementptr inbounds (%struct.TBitfield, ptr @tbitfield, i32 0, i32 2), align 1
   return tbitfield.c;
 }
 
 void test10(unsigned x) {
   // CHECK: @test10
-  // CHECK: load i16, i16* getelementptr inbounds (%struct.TBitfield, %struct.TBitfield* @tbitfield, i32 0, i32 2), align 1
-  // CHECK: store i16 {{.*}}, i16* getelementptr inbounds (%struct.TBitfield, %struct.TBitfield* @tbitfield, i32 0, i32 2), align 1
+  // CHECK: load i16, ptr getelementptr inbounds (%struct.TBitfield, ptr @tbitfield, i32 0, i32 2), align 1
+  // CHECK: store i16 {{.*}}, ptr getelementptr inbounds (%struct.TBitfield, ptr @tbitfield, i32 0, i32 2), align 1
   tbitfield.c = x;
 }
 

diff  --git a/clang/test/CodeGen/partial-reinitialization2.c b/clang/test/CodeGen/partial-reinitialization2.c
index 164a3ca6df7a5..e709c1d4ad1ee 100644
--- a/clang/test/CodeGen/partial-reinitialization2.c
+++ b/clang/test/CodeGen/partial-reinitialization2.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers %s -triple x86_64-unknown-unknown -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple x86_64-unknown-unknown -emit-llvm -o - | FileCheck %s
 
 struct P1 { char x[6]; } g1 = { "foo" };
 struct LP1 { struct P1 p1; };
@@ -15,8 +15,8 @@ union ULP3 { struct LP3 l3; };
 // CHECK-LABEL: test1
 void test1(void)
 {
-  // CHECK: call void @llvm.memcpy{{.*}}%struct.P1, %struct.P1* @g1{{.*}}i64 6, i1 false)
-  // CHECK: store i8 120, i8* %
+  // CHECK: call void @llvm.memcpy{{.*}}ptr align 1 @g1, i64 6, i1 false)
+  // CHECK: store i8 120, ptr %
 
   struct LP1 l = { .p1 = g1, .p1.x[2] = 'x' };
 }
@@ -24,8 +24,8 @@ void test1(void)
 // CHECK-LABEL: test2
 void test2(void)
 {
-  // CHECK: call void @llvm.memcpy{{.*}}%struct.P1, %struct.P1* @g1{{.*}}i64 6, i1 false)
-  // CHECK: store i8 114, i8* %
+  // CHECK: call void @llvm.memcpy{{.*}}ptr align 1 @g1, i64 6, i1 false)
+  // CHECK: store i8 114, ptr %
 
   struct LP1 l = { .p1 = g1, .p1.x[1] = 'r' };
 }
@@ -33,8 +33,8 @@ void test2(void)
 // CHECK-LABEL: test3
 void test3(void)
 {
-  // CHECK: call void @llvm.memcpy{{.*}}%struct.P2* @g2{{.*}}i64 12, i1 false)
-  // CHECK: store i32 10, i32* %
+  // CHECK: call void @llvm.memcpy{{.*}}ptr align 4 @g2, i64 12, i1 false)
+  // CHECK: store i32 10, ptr %
 
   struct LP2 l = { .p2 = g2, .p2.b = 10 };
 }
@@ -64,10 +64,9 @@ union UP2 get123(void)
 void test4(void)
 {
   // CHECK: [[CALL:%[a-z0-9]+]] = call {{.*}}@get123()
-  // CHECK: store{{.*}}[[CALL]], {{.*}}[[TMP0:%[a-z0-9]+]]
-  // CHECK: [[TMP1:%[a-z0-9]+]] = bitcast {{.*}}[[TMP0]]
-  // CHECK: call void @llvm.memcpy{{.*}}[[TMP1]], i64 12, i1 false)
-  // CHECK: store i32 100, i32* %
+  // CHECK: store{{.*}}[[CALL]], {{.*}}[[TMP0:%[a-z0-9.]+]]
+  // CHECK: call void @llvm.memcpy{{.*}}[[TMP0]], i64 12, i1 false)
+  // CHECK: store i32 100, ptr %
 
   struct LUP2 { union UP2 up; } var = { get123(), .up.p2.a = 100 };
 }
@@ -76,15 +75,15 @@ void test4(void)
 void test5(void)
 {
   // .l3 = g3
-  // CHECK: call void @llvm.memcpy{{.*}}%struct.LP3, %struct.LP3* @g3{{.*}}i64 12, i1 false)
+  // CHECK: call void @llvm.memcpy{{.*}}ptr align 1 @g3, i64 12, i1 false)
 
   // .l3.p1 = { [0] = g1 } implicitly sets [1] to zero
-  // CHECK: call void @llvm.memcpy{{.*}}%struct.P1, %struct.P1* @g1{{.*}}i64 6, i1 false)
-  // CHECK: getelementptr{{.*}}%struct.P1, %struct.P1*{{.*}}i64 1
+  // CHECK: call void @llvm.memcpy{{.*}}ptr align 1 @g1, i64 6, i1 false)
+  // CHECK: getelementptr{{.*}}%struct.P1, ptr{{.*}}i64 1
   // CHECK: call void @llvm.memset{{.*}}i8 0, i64 6, i1 false)
 
   // .l3.p1[1].x[1] = 'x'
-  // CHECK: store i8 120, i8* %
+  // CHECK: store i8 120, ptr %
 
   struct LLP3 var = { .l3 = g3, .l3.p1 = { [0] = g1 }, .l3.p1[1].x[1] = 'x' };
 }
@@ -92,15 +91,14 @@ void test5(void)
 // CHECK-LABEL: test6
 void test6(void)
 {
-  // CHECK: [[LP:%[a-z0-9]+]] = getelementptr{{.*}}%struct.LLP2P2, %struct.LLP2P2*{{.*}}, i32 0, i32 0
-  // CHECK: call {{.*}}get456789(%struct.LP2P2* {{.*}}[[LP]])
+  // CHECK: [[LP:%[a-z0-9]+]] = getelementptr{{.*}}%struct.LLP2P2, ptr{{.*}}, i32 0, i32 0
+  // CHECK: call {{.*}}get456789(ptr {{.*}}[[LP]])
 
   // CHECK: [[CALL:%[a-z0-9]+]] = call {{.*}}@get235()
-  // CHECK: store{{.*}}[[CALL]], {{.*}}[[TMP0:%[a-z0-9]+]]
-  // CHECK: [[TMP1:%[a-z0-9]+]] = bitcast {{.*}}[[TMP0]]
-  // CHECK: call void @llvm.memcpy{{.*}}[[TMP1]], i64 12, i1 false)
+  // CHECK: store{{.*}}[[CALL]], {{.*}}[[TMP0:%[a-z0-9.]+]]
+  // CHECK: call void @llvm.memcpy{{.*}}[[TMP0]], i64 12, i1 false)
 
-  // CHECK: store i32 10, i32* %
+  // CHECK: store i32 10, ptr %
 
   struct LLP2P2 { struct LP2P2 lp; } var =  { get456789(),
                                               .lp.p1 = get235(),

diff  --git a/clang/test/CodeGen/semantic-interposition.c b/clang/test/CodeGen/semantic-interposition.c
index 7ad650869a08d..243bd41d9dae8 100644
--- a/clang/test/CodeGen/semantic-interposition.c
+++ b/clang/test/CodeGen/semantic-interposition.c
@@ -1,22 +1,22 @@
 /// -fno-semantic-interposition is the default and local aliases (via dso_local) are allowed.
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-linux-gnu -emit-llvm -mrelocation-model pic -pic-level 1 %s -o - | FileCheck %s --check-prefixes=CHECK,NOMETADATA
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -mrelocation-model pic -pic-level 1 %s -o - | FileCheck %s --check-prefixes=CHECK,NOMETADATA
 
 /// -fsemantic-interposition sets a module metadata.
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-linux-gnu -emit-llvm -mrelocation-model pic -pic-level 1 -fsemantic-interposition %s -o - | FileCheck %s --check-prefixes=PREEMPT,METADATA
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -mrelocation-model pic -pic-level 1 -fsemantic-interposition %s -o - | FileCheck %s --check-prefixes=PREEMPT,METADATA
 
 /// Traditional half-baked behavior: interprocedural optimizations are allowed
 /// but local aliases are not used.
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-linux-gnu -emit-llvm -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition %s -o - | FileCheck %s --check-prefixes=PREEMPT,NOMETADATA
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -mrelocation-model pic -pic-level 1 -fhalf-no-semantic-interposition %s -o - | FileCheck %s --check-prefixes=PREEMPT,NOMETADATA
 
 // CHECK: @var = global i32 0, align 4
 // CHECK: @ext_var = external global i32, align 4
-// CHECK: @ifunc = ifunc i32 (), bitcast (i8* ()* @ifunc_resolver to i32 ()* ()*)
+// CHECK: @ifunc = ifunc i32 (), ptr @ifunc_resolver
 // CHECK: define dso_local i32 @func()
 // CHECK: declare i32 @ext()
 
 // PREEMPT: @var = global i32 0, align 4
 // PREEMPT: @ext_var = external global i32, align 4
-// PREEMPT: @ifunc = ifunc i32 (), bitcast (i8* ()* @ifunc_resolver to i32 ()* ()*)
+// PREEMPT: @ifunc = ifunc i32 (), ptr @ifunc_resolver
 // PREEMPT: define i32 @func()
 // PREEMPT: declare i32 @ext()
 

diff  --git a/clang/test/CodeGen/sparc-vaarg.c b/clang/test/CodeGen/sparc-vaarg.c
index 537072daf62cc..b78949e09350c 100644
--- a/clang/test/CodeGen/sparc-vaarg.c
+++ b/clang/test/CodeGen/sparc-vaarg.c
@@ -1,10 +1,10 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple sparc -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple sparc -emit-llvm -o - %s | FileCheck %s
 #include <stdarg.h>
 
 // CHECK-LABEL: define{{.*}} i32 @get_int
 // CHECK: [[RESULT:%[a-z_0-9]+]] = va_arg {{.*}}, i32{{$}}
-// CHECK: store i32 [[RESULT]], i32* [[LOC:%[a-z_0-9]+]]
-// CHECK: [[RESULT2:%[a-z_0-9]+]] = load i32, i32* [[LOC]]
+// CHECK: store i32 [[RESULT]], ptr [[LOC:%[a-z_0-9]+]]
+// CHECK: [[RESULT2:%[a-z_0-9]+]] = load i32, ptr [[LOC]]
 // CHECK: ret i32 [[RESULT2]]
 int get_int(va_list *args) {
   return va_arg(*args, int);
@@ -17,9 +17,8 @@ struct Foo {
 struct Foo dest;
 
 // CHECK-LABEL: define{{.*}} void @get_struct
-// CHECK: [[RESULT:%[a-z_0-9]+]] = va_arg {{.*}}, %struct.Foo*{{$}}
-// CHECK: [[RESULT2:%[a-z_0-9]+]] = bitcast {{.*}} [[RESULT]] to i8*
-// CHECK: call void @llvm.memcpy{{.*}}@dest{{.*}}, i8* align {{[0-9]+}} [[RESULT2]]
+// CHECK: [[RESULT:%[a-z_0-9]+]] = va_arg {{.*}}, ptr{{$}}
+// CHECK: call void @llvm.memcpy{{.*}}@dest{{.*}}, ptr align {{[0-9]+}} [[RESULT]]
 void get_struct(va_list *args) {
  dest = va_arg(*args, struct Foo);
 }
@@ -29,7 +28,7 @@ enum E { Foo_one = 1 };
 enum E enum_dest;
 
 // CHECK-LABEL: define{{.*}} void @get_enum
-// CHECK: va_arg i8** {{.*}}, i32
+// CHECK: va_arg ptr {{.*}}, i32
 void get_enum(va_list *args) {
   enum_dest = va_arg(*args, enum E);
 }

diff  --git a/clang/test/CodeGen/sparcv9-dwarf.c b/clang/test/CodeGen/sparcv9-dwarf.c
index 654ce6465517b..b9d64a5adab44 100644
--- a/clang/test/CodeGen/sparcv9-dwarf.c
+++ b/clang/test/CodeGen/sparcv9-dwarf.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple sparcv9-unknown-unknown -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple sparcv9-unknown-unknown -emit-llvm %s -o - | FileCheck %s
 static unsigned char dwarf_reg_size_table[102+1];
 
 int test(void) {
@@ -8,92 +8,92 @@ int test(void) {
 }
 
 // CHECK-LABEL: define{{.*}} signext i32 @test()
-// CHECK:       store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 0)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 1)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 2)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 3)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 4)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 5)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 6)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 7)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 8)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 9)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 10)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 11)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 12)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 13)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 14)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 15)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 16)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 17)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 18)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 19)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 20)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 21)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 22)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 23)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 24)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 25)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 26)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 27)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 28)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 29)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 30)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 31)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 32)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 33)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 34)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 35)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 36)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 37)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 38)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 39)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 40)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 41)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 42)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 43)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 44)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 45)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 46)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 47)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 48)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 49)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 50)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 51)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 52)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 53)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 54)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 55)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 56)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 57)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 58)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 59)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 60)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 61)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 62)
-// CHECK-NEXT:  store i8 4, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 63)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 64)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 65)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 66)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 67)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 68)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 69)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 70)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 71)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 72)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 73)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 74)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 75)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 76)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 77)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 78)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 79)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 80)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 81)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 82)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 83)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 84)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 85)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 86)
-// CHECK-NEXT:  store i8 8, i8* getelementptr inbounds ([103 x i8], [103 x i8]* @dwarf_reg_size_table, i64 0, i64 87)
+// CHECK:       store i8 8, ptr @dwarf_reg_size_table
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 1), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 2), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 3), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 4), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 5), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 6), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 7), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 8), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 9), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 10), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 11), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 12), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 13), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 14), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 15), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 16), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 17), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 18), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 19), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 20), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 21), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 22), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 23), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 24), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 25), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 26), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 27), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 28), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 29), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 30), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 31), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 32), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 33), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 34), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 35), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 36), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 37), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 38), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 39), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 40), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 41), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 42), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 43), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 44), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 45), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 46), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 47), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 48), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 49), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 50), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 51), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 52), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 53), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 54), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 55), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 56), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 57), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 58), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 59), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 60), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 61), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 62), align 1
+// CHECK-NEXT: store i8 4, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 63), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 64), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 65), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 66), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 67), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 68), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 69), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 70), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 71), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 72), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 73), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 74), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 75), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 76), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 77), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 78), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 79), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 80), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 81), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 82), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 83), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 84), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 85), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 86), align 1
+// CHECK-NEXT: store i8 8, ptr getelementptr inbounds (i8, ptr @dwarf_reg_size_table, i32 87), align 1
 // CHECK-NEXT:  ret i32 14

diff  --git a/clang/test/CodeGen/staticinit.c b/clang/test/CodeGen/staticinit.c
index 1a876b99f2b84..90b8fa5edb024 100644
--- a/clang/test/CodeGen/staticinit.c
+++ b/clang/test/CodeGen/staticinit.c
@@ -1,5 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-pc-linux-gnu -emit-llvm -o %t %s
-// RUN: grep "g.b = internal global i8. getelementptr" %t
+// RUN: %clang_cc1 -triple i386-pc-linux-gnu -emit-llvm -o - %s | FileCheck %s
 
 struct AStruct { 
   int i;
@@ -15,6 +14,7 @@ void f(void) {
   static struct AStruct myStruct = { 1, "two", 3.0 };
 }
 
+// CHECK: @g.b = internal global ptr @g.a
 void g(void) {
   static char a[10];
   static char *b = a;
@@ -26,7 +26,7 @@ void foo(void) {
   static struct s var = {((void*)&((char*)0)[0])};
 }
 
-// RUN: grep "f1.l0 = internal global i32 ptrtoint (i32 ()\* @f1 to i32)" %t
+// CHECK: @f1.l0 = internal global i32 ptrtoint (ptr @f1 to i32)
 int f1(void) { static int l0 = (unsigned) f1; }
 
 // PR7044

diff  --git a/clang/test/CodeGen/temporary-lifetime-exceptions.cpp b/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
index 4cea0e980750a..50e4a0f56cb2d 100644
--- a/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
+++ b/clang/test/CodeGen/temporary-lifetime-exceptions.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers %s -fexceptions -fcxx-exceptions -std=c++11 -O1 -triple x86_64 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -fexceptions -fcxx-exceptions -std=c++11 -O1 -triple x86_64 -emit-llvm -o - | FileCheck %s
 
 // lifetime.end should be invoked even if the destructor doesn't run due to an
 // exception thrown from previous ctor call.
@@ -8,17 +8,15 @@ A Baz(const A&);
 
 void Test1() {
   // CHECK-LABEL: @_Z5Test1v(
-  // CHECK: getelementptr
-  // CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull [[TMP:[^ ]+]])
-  // CHECK-NEXT: getelementptr
-  // CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull [[TMP1:[^ ]+]])
+  // CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TMP:[^ ]+]])
+  // CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TMP1:[^ ]+]])
 
   // Normal exit
-  // CHECK: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP1]])
-  // CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP]])
+  // CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP1]])
+  // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP]])
 
   // Exception exit
-  // CHECK: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP1]])
-  // CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull [[TMP]])
+  // CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP1]])
+  // CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TMP]])
   Baz(Baz(A()));
 }

diff  --git a/clang/test/CodeGen/union-tbaa1.c b/clang/test/CodeGen/union-tbaa1.c
index 8be8fcea21a99..f6f10b3e5b1e1 100644
--- a/clang/test/CodeGen/union-tbaa1.c
+++ b/clang/test/CodeGen/union-tbaa1.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers %s -triple hexagon-unknown-elf -O2 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -triple hexagon-unknown-elf -O2 -emit-llvm -o - | FileCheck %s
 
 typedef union __attribute__((aligned(4))) {
   unsigned short uh[2];
@@ -7,38 +7,43 @@ typedef union __attribute__((aligned(4))) {
 
 void bar(vect32 p[][2]);
 
-// CHECK-LABEL: define{{.*}} void @fred
+// CHECK-LABEL: define dso_local void @fred
+// CHECK-SAME: (i32 noundef [[NUM:%.*]], ptr nocapture noundef writeonly [[VEC:%.*]], ptr nocapture noundef readonly [[INDEX:%.*]], ptr nocapture noundef readonly [[ARR:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca [4 x [2 x %union.vect32]], align 8
+// CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[TMP]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[TMP1]], [[NUM]]
+// CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]]
+// CHECK-NEXT:    store i32 [[MUL]], ptr [[ARRAYIDX2]], align 8, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]], i32 1
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[MUL6:%.*]] = mul i32 [[TMP2]], [[NUM]]
+// CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]], i32 1
+// CHECK-NEXT:    store i32 [[MUL6]], ptr [[ARRAYIDX8]], align 4, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP3:%.*]] = lshr i32 [[MUL]], 16
+// CHECK-NEXT:    store i32 [[TMP3]], ptr [[VEC]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP4]], i32 1
+// CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds [2 x i16], ptr [[ARRAYIDX14]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX15]], align 2, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[CONV16:%.*]] = zext i16 [[TMP5]] to i32
+// CHECK-NEXT:    [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, ptr [[VEC]], i32 1
+// CHECK-NEXT:    store i32 [[CONV16]], ptr [[ARRAYIDX17]], align 4, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @bar(ptr noundef nonnull [[TMP]]) #[[ATTR3]]
+// CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[TMP]]) #[[ATTR3]]
+// CHECK-NEXT:    ret void
+//
 void fred(unsigned Num, int Vec[2], int *Index, int Arr[4][2]) {
   vect32 Tmp[4][2];
-// Generate tbaa for the load of Index:
-// CHECK: load i32, i32* %Index{{.*}}tbaa
-// But no tbaa for the two stores:
-// CHECK: %uw[[UW1:[0-9]*]] = getelementptr
-// CHECK: store{{.*}}%uw[[UW1]]
-// CHECK: tbaa ![[OCPATH:[0-9]+]]
-// There will be a load after the store, and it will use tbaa. Make sure
-// the check-not above doesn't find it:
-// CHECK: load
   Tmp[*Index][0].uw = Arr[*Index][0] * Num;
-// CHECK: %uw[[UW2:[0-9]*]] = getelementptr
-// CHECK: store{{.*}}%uw[[UW2]]
-// CHECK: tbaa ![[OCPATH]]
   Tmp[*Index][1].uw = Arr[*Index][1] * Num;
-// Same here, don't generate tbaa for the loads:
-// CHECK: %uh[[UH1:[0-9]*]] = bitcast %union.vect32
-// CHECK: %arrayidx[[AX1:[0-9]*]] = getelementptr{{.*}}%uh[[UH1]]
-// CHECK: load i16, i16* %arrayidx[[AX1]]
-// CHECK: tbaa ![[OCPATH]]
-// CHECK: store
   Vec[0] = Tmp[*Index][0].uh[1];
-// CHECK: %uh[[UH2:[0-9]*]] = bitcast %union.vect32
-// CHECK: %arrayidx[[AX2:[0-9]*]] = getelementptr{{.*}}%uh[[UH2]]
-// CHECK: load i16, i16* %arrayidx[[AX2]]
-// CHECK: tbaa ![[OCPATH]]
-// CHECK: store
   Vec[1] = Tmp[*Index][1].uh[1];
   bar(Tmp);
 }
 
-// CHECK-DAG: ![[CHAR:[0-9]+]] = !{!"omnipotent char"
-// CHECK-DAG: ![[OCPATH]] = !{![[CHAR]], ![[CHAR]], i64 0}
+// CHECK-DAG: [[CHAR:![0-9]+]] = !{!"omnipotent char"
+// CHECK-DAG: [[TBAA6]] = !{[[CHAR]], [[CHAR]], i64 0}

diff  --git a/clang/test/CodeGen/volatile.c b/clang/test/CodeGen/volatile.c
index 4fa114d375207..14e8f1f498e2c 100644
--- a/clang/test/CodeGen/volatile.c
+++ b/clang/test/CodeGen/volatile.c
@@ -1,6 +1,6 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple=aarch64-unknown-linux-gnu -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefixes CHECK-IT,CHECK-IT-ARM
-// RUN: %clang_cc1 -no-opaque-pointers -triple=x86_64-unknown-linux-gnu -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefixes CHECK-IT,CHECK-IT-OTHER
-// RUN: %clang_cc1 -no-opaque-pointers -triple=%ms_abi_triple -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MS
+// RUN: %clang_cc1 -triple=aarch64-unknown-linux-gnu -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefixes CHECK-IT,CHECK-IT-ARM
+// RUN: %clang_cc1 -triple=x86_64-unknown-linux-gnu -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefixes CHECK-IT,CHECK-IT-OTHER
+// RUN: %clang_cc1 -triple=%ms_abi_triple -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MS
 
 int S;
 volatile int vS;
@@ -42,177 +42,177 @@ int main(void) {
 // CHECK: [[I:%[a-zA-Z0-9_.]+]] = alloca i32
   // load
   i=S;
-// CHECK: load i32, i32* @S
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load i32, ptr @S
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vS;
-// CHECK: load volatile i32, i32* @vS
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile i32, ptr @vS
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=*pS;
-// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pS
-// CHECK: load i32, i32* [[PS_VAL]]
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load ptr, ptr @pS
+// CHECK: load i32, ptr [[PS_VAL]]
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=*pvS;
-// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pvS
-// CHECK: load volatile i32, i32* [[PVS_VAL]]
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load ptr, ptr @pvS
+// CHECK: load volatile i32, ptr [[PVS_VAL]]
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=A[2];
-// CHECK: load i32, i32* getelementptr {{.*}} @A
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load i32, ptr getelementptr {{.*}} @A
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vA[2];
-// CHECK: load volatile i32, i32* getelementptr {{.*}} @vA
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile i32, ptr getelementptr {{.*}} @vA
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=F.x;
-// CHECK: load i32, i32* getelementptr {{.*}} @F
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load i32, ptr @F
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vF.x;
-// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile i32, ptr @vF
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=F2.x;
-// CHECK: load i32, i32* getelementptr {{.*}} @F2
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load i32, ptr @F2
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vF2.x;
-// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF2
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile i32, ptr @vF2
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vpF2->x;
-// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9_.]+}}*, {{%[a-zA-Z0-9_.]+}}** @vpF2
+// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load ptr, ptr @vpF2
 // CHECK: [[ELT:%[a-zA-Z0-9_.]+]] = getelementptr {{.*}} [[VPF2_VAL]]
-// CHECK: load volatile i32, i32* [[ELT]]
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile i32, ptr [[ELT]]
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=F3.x.y;
-// CHECK: load i32, i32* getelementptr {{.*}} @F3
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load i32, ptr @F3
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vF3.x.y;
-// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF3
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile i32, ptr @vF3
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=BF.x;
-// CHECK-IT: load i8, i8* getelementptr {{.*}} @BF
-// CHECK-MS: load i32, i32* getelementptr {{.*}} @BF
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK-IT: load i8, ptr @BF
+// CHECK-MS: load i32, ptr @BF
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vBF.x;
-// CHECK-IT-OTHER: load volatile i8, i8* getelementptr {{.*}} @vBF
-// CHECK-IT-ARM: load volatile i32, i32* bitcast {{.*}} @vBF
-// CHECK-MS: load volatile i32, i32* getelementptr {{.*}} @vBF
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK-IT-OTHER: load volatile i8, ptr @vBF
+// CHECK-IT-ARM: load volatile i32, ptr @vBF
+// CHECK-MS: load volatile i32, ptr @vBF
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=V[3];
-// CHECK: load <4 x i32>, <4 x i32>* @V
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load <4 x i32>, ptr @V
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vV[3];
-// CHECK: load volatile <4 x i32>, <4 x i32>* @vV
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile <4 x i32>, ptr @vV
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=VE.yx[1];
-// CHECK: load <4 x i32>, <4 x i32>* @VE
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load <4 x i32>, ptr @VE
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vVE.zy[1];
-// CHECK: load volatile <4 x i32>, <4 x i32>* @vVE
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile <4 x i32>, ptr @vVE
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i = aggFct().x; // Note: not volatile
   // N.b. Aggregate return is extremely target specific, all we can
   // really say here is that there probably shouldn't be a volatile
   // load.
 // CHECK-NOT: load volatile
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i=vtS;
-// CHECK: load volatile i32, i32* @vtS
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile i32, ptr @vtS
+// CHECK: store i32 {{.*}}, ptr [[I]]
 
 
   // store
   S=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store i32 {{.*}}, i32* @S
+// CHECK: load i32, ptr [[I]]
+// CHECK: store i32 {{.*}}, ptr @S
   vS=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store volatile i32 {{.*}}, i32* @vS
+// CHECK: load i32, ptr [[I]]
+// CHECK: store volatile i32 {{.*}}, ptr @vS
   *pS=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pS
-// CHECK: store i32 {{.*}}, i32* [[PS_VAL]]
+// CHECK: load i32, ptr [[I]]
+// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load ptr, ptr @pS
+// CHECK: store i32 {{.*}}, ptr [[PS_VAL]]
   *pvS=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pvS
-// CHECK: store volatile i32 {{.*}}, i32* [[PVS_VAL]]
+// CHECK: load i32, ptr [[I]]
+// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load ptr, ptr @pvS
+// CHECK: store volatile i32 {{.*}}, ptr [[PVS_VAL]]
   A[2]=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @A
+// CHECK: load i32, ptr [[I]]
+// CHECK: store i32 {{.*}}, ptr getelementptr {{.*}} @A
   vA[2]=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vA
+// CHECK: load i32, ptr [[I]]
+// CHECK: store volatile i32 {{.*}}, ptr getelementptr {{.*}} @vA
   F.x=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @F
+// CHECK: load i32, ptr [[I]]
+// CHECK: store i32 {{.*}}, ptr @F
   vF.x=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF
+// CHECK: load i32, ptr [[I]]
+// CHECK: store volatile i32 {{.*}}, ptr @vF
   F2.x=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @F2
+// CHECK: load i32, ptr [[I]]
+// CHECK: store i32 {{.*}}, ptr @F2
   vF2.x=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF2
+// CHECK: load i32, ptr [[I]]
+// CHECK: store volatile i32 {{.*}}, ptr @vF2
   vpF2->x=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9._]+}}*, {{%[a-zA-Z0-9._]+}}** @vpF2
+// CHECK: load i32, ptr [[I]]
+// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load ptr, ptr @vpF2
 // CHECK: [[ELT:%[a-zA-Z0-9_.]+]] = getelementptr {{.*}} [[VPF2_VAL]]
-// CHECK: store volatile i32 {{.*}}, i32* [[ELT]]
+// CHECK: store volatile i32 {{.*}}, ptr [[ELT]]
   vF3.x.y=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF3
+// CHECK: load i32, ptr [[I]]
+// CHECK: store volatile i32 {{.*}}, ptr @vF3
   BF.x=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK-IT: load i8, i8* getelementptr {{.*}} @BF
-// CHECK-MS: load i32, i32* getelementptr {{.*}} @BF
-// CHECK-IT: store i8 {{.*}}, i8* getelementptr {{.*}} @BF
-// CHECK-MS: store i32 {{.*}}, i32* getelementptr {{.*}} @BF
+// CHECK: load i32, ptr [[I]]
+// CHECK-IT: load i8, ptr @BF
+// CHECK-MS: load i32, ptr @BF
+// CHECK-IT: store i8 {{.*}}, ptr @BF
+// CHECK-MS: store i32 {{.*}}, ptr @BF
   vBF.x=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK-IT-OTHER: load volatile i8, i8* getelementptr {{.*}} @vBF
-// CHECK-IT-ARM: load volatile i32, i32* bitcast {{.*}} @vBF
-// CHECK-MS: load volatile i32, i32* getelementptr {{.*}} @vBF
-// CHECK-IT-OTHER: store volatile i8 {{.*}}, i8* getelementptr {{.*}} @vBF
-// CHECK-IT-ARM: store volatile i32 {{.*}}, i32* bitcast {{.*}} @vBF
-// CHECK-MS: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vBF
+// CHECK: load i32, ptr [[I]]
+// CHECK-IT-OTHER: load volatile i8, ptr @vBF
+// CHECK-IT-ARM: load volatile i32, ptr @vBF
+// CHECK-MS: load volatile i32, ptr @vBF
+// CHECK-IT-OTHER: store volatile i8 {{.*}}, ptr @vBF
+// CHECK-IT-ARM: store volatile i32 {{.*}}, ptr @vBF
+// CHECK-MS: store volatile i32 {{.*}}, ptr @vBF
   V[3]=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: load <4 x i32>, <4 x i32>* @V
-// CHECK: store <4 x i32> {{.*}}, <4 x i32>* @V
+// CHECK: load i32, ptr [[I]]
+// CHECK: load <4 x i32>, ptr @V
+// CHECK: store <4 x i32> {{.*}}, ptr @V
   vV[3]=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: load volatile <4 x i32>, <4 x i32>* @vV
-// CHECK: store volatile <4 x i32> {{.*}}, <4 x i32>* @vV
+// CHECK: load i32, ptr [[I]]
+// CHECK: load volatile <4 x i32>, ptr @vV
+// CHECK: store volatile <4 x i32> {{.*}}, ptr @vV
   vtS=i;
-// CHECK: load i32, i32* [[I]]
-// CHECK: store volatile i32 {{.*}}, i32* @vtS
+// CHECK: load i32, ptr [[I]]
+// CHECK: store volatile i32 {{.*}}, ptr @vtS
 
   // other ops:
   ++S;
-// CHECK: load i32, i32* @S
-// CHECK: store i32 {{.*}}, i32* @S
+// CHECK: load i32, ptr @S
+// CHECK: store i32 {{.*}}, ptr @S
   ++vS;
-// CHECK: load volatile i32, i32* @vS
-// CHECK: store volatile i32 {{.*}}, i32* @vS
+// CHECK: load volatile i32, ptr @vS
+// CHECK: store volatile i32 {{.*}}, ptr @vS
   i+=S;
-// CHECK: load i32, i32* @S
-// CHECK: load i32, i32* [[I]]
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load i32, ptr @S
+// CHECK: load i32, ptr [[I]]
+// CHECK: store i32 {{.*}}, ptr [[I]]
   i+=vS;
-// CHECK: load volatile i32, i32* @vS
-// CHECK: load i32, i32* [[I]]
-// CHECK: store i32 {{.*}}, i32* [[I]]
+// CHECK: load volatile i32, ptr @vS
+// CHECK: load i32, ptr [[I]]
+// CHECK: store i32 {{.*}}, ptr [[I]]
   ++vtS;
-// CHECK: load volatile i32, i32* @vtS
-// CHECK: store volatile i32 {{.*}}, i32* @vtS
+// CHECK: load volatile i32, ptr @vtS
+// CHECK: store volatile i32 {{.*}}, ptr @vtS
   (void)vF2;
   // From vF2 to a temporary
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* align {{[0-9]+}} %{{.*}}, i8* {{.*}} @vF2 {{.*}}, i1 true)
+// CHECK: call void @llvm.memcpy.{{.*}}(ptr align {{[0-9]+}} %{{.*}}, ptr {{.*}} @vF2, {{.*}}, i1 true)
   vF2 = vF2;
   // vF2 to itself
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
+// CHECK: call void @llvm.memcpy.{{.*}}(ptr {{.*@vF2.*}}, ptr {{.*@vF2.*}}, i1 true)
   vF2 = vF2 = vF2;
   // vF2 to itself twice
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
+// CHECK: call void @llvm.memcpy.{{.*}}(ptr {{.*@vF2.*}}, ptr {{.*@vF2.*}}, i1 true)
+// CHECK: call void @llvm.memcpy.{{.*}}(ptr {{.*@vF2.*}}, ptr {{.*@vF2.*}}, i1 true)
   vF2 = (vF2, vF2);
   // vF2 to a temporary, then vF2 to itself
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* align {{[0-9]+}} %{{.*}}, i8* {{.*@vF2.*}}, i1 true)
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
+// CHECK: call void @llvm.memcpy.{{.*}}(ptr align {{[0-9]+}} %{{.*}}, ptr {{.*@vF2.*}}, i1 true)
+// CHECK: call void @llvm.memcpy.{{.*}}(ptr {{.*@vF2.*}}, ptr {{.*@vF2.*}}, i1 true)
 }

diff  --git a/clang/test/CodeGen/windows-swiftcall.c b/clang/test/CodeGen/windows-swiftcall.c
index 0aa93fae0581d..3e5c8a4d4b9d7 100644
--- a/clang/test/CodeGen/windows-swiftcall.c
+++ b/clang/test/CodeGen/windows-swiftcall.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-windows -emit-llvm -target-cpu core2 -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-unknown-windows -emit-llvm -target-cpu core2 -o - %s | FileCheck %s
 
 #define SWIFTCALL __attribute__((swiftcall))
 #define OUT __attribute__((swift_indirect_result))
@@ -12,29 +12,29 @@
 /*****************************************************************************/
 
 SWIFTCALL void indirect_result_1(OUT int *arg0, OUT float *arg1) {}
-// CHECK-LABEL: define {{.*}} void @indirect_result_1(i32* noalias noundef sret(i32*) align 4 dereferenceable(4){{.*}}, float* noalias noundef align 4 dereferenceable(4){{.*}})
+// CHECK-LABEL: define {{.*}} void @indirect_result_1(ptr noalias noundef sret(ptr) align 4 dereferenceable(4){{.*}}, ptr noalias noundef align 4 dereferenceable(4){{.*}})
 
 // TODO: maybe this shouldn't suppress sret.
 SWIFTCALL int indirect_result_2(OUT int *arg0, OUT float *arg1) {  __builtin_unreachable(); }
-// CHECK-LABEL: define {{.*}} i32 @indirect_result_2(i32* noalias noundef align 4 dereferenceable(4){{.*}}, float* noalias noundef align 4 dereferenceable(4){{.*}})
+// CHECK-LABEL: define {{.*}} i32 @indirect_result_2(ptr noalias noundef align 4 dereferenceable(4){{.*}}, ptr noalias noundef align 4 dereferenceable(4){{.*}})
 
 typedef struct { char array[1024]; } struct_reallybig;
 SWIFTCALL struct_reallybig indirect_result_3(OUT int *arg0, OUT float *arg1) { __builtin_unreachable(); }
-// CHECK-LABEL: define {{.*}} void @indirect_result_3({{.*}}* noalias sret({{.*}}) {{.*}}, i32* noalias noundef align 4 dereferenceable(4){{.*}}, float* noalias noundef align 4 dereferenceable(4){{.*}})
+// CHECK-LABEL: define {{.*}} void @indirect_result_3(ptr noalias sret({{.*}}) {{.*}}, ptr noalias noundef align 4 dereferenceable(4){{.*}}, ptr noalias noundef align 4 dereferenceable(4){{.*}})
 
 SWIFTCALL void context_1(CONTEXT void *self) {}
-// CHECK-LABEL: define {{.*}} void @context_1(i8* noundef swiftself
+// CHECK-LABEL: define {{.*}} void @context_1(ptr noundef swiftself
 
 SWIFTCALL void context_2(void *arg0, CONTEXT void *self) {}
-// CHECK-LABEL: define {{.*}} void @context_2(i8*{{.*}}, i8* noundef swiftself
+// CHECK-LABEL: define {{.*}} void @context_2(ptr{{.*}}, ptr noundef swiftself
 
 SWIFTCALL void context_error_1(CONTEXT int *self, ERROR float **error) {}
-// CHECK-LABEL: define {{.*}} void @context_error_1(i32* noundef swiftself{{.*}}, float** noundef swifterror %0)
-// CHECK:       [[TEMP:%.*]] = alloca float*, align 8
-// CHECK:       [[T0:%.*]] = load float*, float** [[ERRORARG:%.*]], align 8
-// CHECK:       store float* [[T0]], float** [[TEMP]], align 8
-// CHECK:       [[T0:%.*]] = load float*, float** [[TEMP]], align 8
-// CHECK:       store float* [[T0]], float** [[ERRORARG]], align 8
+// CHECK-LABEL: define {{.*}} void @context_error_1(ptr noundef swiftself{{.*}}, ptr noundef swifterror %0)
+// CHECK:       [[TEMP:%.*]] = alloca ptr, align 8
+// CHECK:       [[T0:%.*]] = load ptr, ptr [[ERRORARG:%.*]], align 8
+// CHECK:       store ptr [[T0]], ptr [[TEMP]], align 8
+// CHECK:       [[T0:%.*]] = load ptr, ptr [[TEMP]], align 8
+// CHECK:       store ptr [[T0]], ptr [[ERRORARG]], align 8
 void test_context_error_1(void) {
   int x;
   float *error;
@@ -42,16 +42,16 @@ void test_context_error_1(void) {
 }
 // CHECK-LABEL: define dso_local void @test_context_error_1()
 // CHECK:       [[X:%.*]] = alloca i32, align 4
-// CHECK:       [[ERROR:%.*]] = alloca float*, align 8
-// CHECK:       [[TEMP:%.*]] = alloca swifterror float*, align 8
-// CHECK:       [[T0:%.*]] = load float*, float** [[ERROR]], align 8
-// CHECK:       store float* [[T0]], float** [[TEMP]], align 8
-// CHECK:       call [[SWIFTCC:swiftcc]] void @context_error_1(i32* noundef swiftself [[X]], float** noundef swifterror [[TEMP]])
-// CHECK:       [[T0:%.*]] = load float*, float** [[TEMP]], align 8
-// CHECK:       store float* [[T0]], float** [[ERROR]], align 8
+// CHECK:       [[ERROR:%.*]] = alloca ptr, align 8
+// CHECK:       [[TEMP:%.*]] = alloca swifterror ptr, align 8
+// CHECK:       [[T0:%.*]] = load ptr, ptr [[ERROR]], align 8
+// CHECK:       store ptr [[T0]], ptr [[TEMP]], align 8
+// CHECK:       call [[SWIFTCC:swiftcc]] void @context_error_1(ptr noundef swiftself [[X]], ptr noundef swifterror [[TEMP]])
+// CHECK:       [[T0:%.*]] = load ptr, ptr [[TEMP]], align 8
+// CHECK:       store ptr [[T0]], ptr [[ERROR]], align 8
 
 SWIFTCALL void context_error_2(short s, CONTEXT int *self, ERROR float **error) {}
-// CHECK-LABEL: define {{.*}} void @context_error_2(i16{{.*}}, i32* noundef swiftself{{.*}}, float** noundef swifterror %0)
+// CHECK-LABEL: define {{.*}} void @context_error_2(i16{{.*}}, ptr noundef swiftself{{.*}}, ptr noundef swifterror %0)
 
 /*****************************************************************************/
 /********************************** LOWERING *********************************/
@@ -100,39 +100,35 @@ TEST(struct_1);
 // CHECK-LABEL: define dso_local swiftcc { i64, i64 } @return_struct_1() {{.*}}{
 // CHECK:   [[RET:%.*]] = alloca [[STRUCT1:%.*]], align 4
 // CHECK:   call void @llvm.memset
-// CHECK:   [[CAST:%.*]] = bitcast [[STRUCT1]]* %retval to { i64, i64 }*
-// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
-// CHECK:   [[T0:%.*]] = load i64, i64* [[GEP0]], align 4
-// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 1
-// CHECK:   [[T1:%.*]] = load i64, i64* [[GEP1]], align 4
+// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, ptr %retval, i32 0, i32 0
+// CHECK:   [[T0:%.*]] = load i64, ptr [[GEP0]], align 4
+// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, ptr %retval, i32 0, i32 1
+// CHECK:   [[T1:%.*]] = load i64, ptr [[GEP1]], align 4
 // CHECK:   [[R0:%.*]] = insertvalue { i64, i64 } poison, i64 [[T0]], 0
 // CHECK:   [[R1:%.*]] = insertvalue { i64, i64 } [[R0]], i64 [[T1]], 1
 // CHECK:   ret { i64, i64 } [[R1]]
 // CHECK: }
 // CHECK-LABEL: define dso_local swiftcc void @take_struct_1(i64 %0, i64 %1) {{.*}}{
 // CHECK:   [[V:%.*]] = alloca [[STRUCT1:%.*]], align 4
-// CHECK:   [[CAST:%.*]] = bitcast [[STRUCT1]]* [[V]] to { i64, i64 }*
-// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
-// CHECK:   store i64 %0, i64* [[GEP0]], align 4
-// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 1
-// CHECK:   store i64 %1, i64* [[GEP1]], align 4
+// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[V]], i32 0, i32 0
+// CHECK:   store i64 %0, ptr [[GEP0]], align 4
+// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[V]], i32 0, i32 1
+// CHECK:   store i64 %1, ptr [[GEP1]], align 4
 // CHECK:   ret void
 // CHECK: }
 // CHECK-LABEL: define dso_local void @test_struct_1() {{.*}}{
 // CHECK:   [[AGG:%.*]] = alloca [[STRUCT1:%.*]], align 4
 // CHECK:   [[RET:%.*]] = call swiftcc { i64, i64 } @return_struct_1()
-// CHECK:   [[CAST:%.*]] = bitcast [[STRUCT1]]* [[AGG]] to { i64, i64 }*
-// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
+// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[AGG]], i32 0, i32 0
 // CHECK:   [[E0:%.*]] = extractvalue { i64, i64 } [[RET]], 0
-// CHECK:   store i64 [[E0]], i64* [[GEP0]], align 4
-// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 1
+// CHECK:   store i64 [[E0]], ptr [[GEP0]], align 4
+// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[AGG]], i32 0, i32 1
 // CHECK:   [[E1:%.*]] = extractvalue { i64, i64 } [[RET]], 1
-// CHECK:   store i64 [[E1]], i64* [[GEP1]], align 4
-// CHECK:   [[CAST2:%.*]] = bitcast [[STRUCT1]]* [[AGG]] to { i64, i64 }*
-// CHECK:   [[GEP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST2]], i32 0, i32 0
-// CHECK:   [[V0:%.*]] = load i64, i64* [[GEP2]], align 4
-// CHECK:   [[GEP3:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST2]], i32 0, i32 1
-// CHECK:   [[V1:%.*]] = load i64, i64* [[GEP3]], align 4
+// CHECK:   store i64 [[E1]], ptr [[GEP1]], align 4
+// CHECK:   [[GEP2:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[AGG]], i32 0, i32 0
+// CHECK:   [[V0:%.*]] = load i64, ptr [[GEP2]], align 4
+// CHECK:   [[GEP3:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[AGG]], i32 0, i32 1
+// CHECK:   [[V1:%.*]] = load i64, ptr [[GEP3]], align 4
 // CHECK:   call swiftcc void @take_struct_1(i64 [[V0]], i64 [[V1]])
 // CHECK:   ret void
 // CHECK: }
@@ -147,41 +143,36 @@ typedef struct {
 TEST(struct_2);
 // CHECK-LABEL: define dso_local swiftcc { i64, i64 } @return_struct_2() {{.*}}{
 // CHECK:   [[RET:%.*]] = alloca [[STRUCT2_TYPE]], align 4
-// CHECK:   [[CASTVAR:%.*]] = bitcast {{.*}} [[RET]]
-// CHECK:   call void @llvm.memcpy{{.*}}({{.*}}[[CASTVAR]], {{.*}}[[STRUCT2_RESULT]]
-// CHECK:   [[CAST:%.*]] = bitcast [[STRUCT2_TYPE]]* [[RET]] to { i64, i64 }*
-// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
-// CHECK:   [[T0:%.*]] = load i64, i64* [[GEP0]], align 4
-// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 1
-// CHECK:   [[T1:%.*]] = load i64, i64* [[GEP1]], align 4
+// CHECK:   call void @llvm.memcpy{{.*}}({{.*}}[[RET]], {{.*}}[[STRUCT2_RESULT]]
+// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[RET]], i32 0, i32 0
+// CHECK:   [[T0:%.*]] = load i64, ptr [[GEP0]], align 4
+// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[RET]], i32 0, i32 1
+// CHECK:   [[T1:%.*]] = load i64, ptr [[GEP1]], align 4
 // CHECK:   [[R0:%.*]] = insertvalue { i64, i64 } poison, i64 [[T0]], 0
 // CHECK:   [[R1:%.*]] = insertvalue { i64, i64 } [[R0]], i64 [[T1]], 1
 // CHECK:   ret { i64, i64 } [[R1]]
 // CHECK: }
 // CHECK-LABEL: define dso_local swiftcc void @take_struct_2(i64 %0, i64 %1) {{.*}}{
 // CHECK:   [[V:%.*]] = alloca [[STRUCT:%.*]], align 4
-// CHECK:   [[CAST:%.*]] = bitcast [[STRUCT]]* [[V]] to { i64, i64 }*
-// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
-// CHECK:   store i64 %0, i64* [[GEP0]], align 4
-// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 1
-// CHECK:   store i64 %1, i64* [[GEP1]], align 4
+// CHECK:   [[GEP0:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[V]], i32 0, i32 0
+// CHECK:   store i64 %0, ptr [[GEP0]], align 4
+// CHECK:   [[GEP1:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[V]], i32 0, i32 1
+// CHECK:   store i64 %1, ptr [[GEP1]], align 4
 // CHECK:   ret void
 // CHECK: }
 // CHECK-LABEL: define dso_local void @test_struct_2() {{.*}} {
 // CHECK:   [[TMP:%.*]] = alloca [[STRUCT2_TYPE]], align 4
 // CHECK:   [[CALL:%.*]] = call swiftcc { i64, i64 } @return_struct_2()
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[STRUCT2_TYPE]]* [[TMP]] to { i64, i64 }*
-// CHECK:   [[GEP:%.*]] = getelementptr inbounds {{.*}} [[CAST_TMP]], i32 0, i32 0
+// CHECK:   [[GEP:%.*]] = getelementptr inbounds {{.*}} [[TMP]], i32 0, i32 0
 // CHECK:   [[T0:%.*]] = extractvalue { i64, i64 } [[CALL]], 0
-// CHECK:   store i64 [[T0]], i64* [[GEP]], align 4
-// CHECK:   [[GEP:%.*]] = getelementptr inbounds {{.*}} [[CAST_TMP]], i32 0, i32 1
+// CHECK:   store i64 [[T0]], ptr [[GEP]], align 4
+// CHECK:   [[GEP:%.*]] = getelementptr inbounds {{.*}} [[TMP]], i32 0, i32 1
 // CHECK:   [[T0:%.*]] = extractvalue { i64, i64 } [[CALL]], 1
-// CHECK:   store i64 [[T0]], i64* [[GEP]], align 4
-// CHECK:   [[CAST:%.*]] = bitcast [[STRUCT2_TYPE]]* [[TMP]] to { i64, i64 }*
-// CHECK:   [[GEP:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 0
-// CHECK:   [[R0:%.*]] = load i64, i64* [[GEP]], align 4
-// CHECK:   [[GEP:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[CAST]], i32 0, i32 1
-// CHECK:   [[R1:%.*]] = load i64, i64* [[GEP]], align 4
+// CHECK:   store i64 [[T0]], ptr [[GEP]], align 4
+// CHECK:   [[GEP:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[TMP]], i32 0, i32 0
+// CHECK:   [[R0:%.*]] = load i64, ptr [[GEP]], align 4
+// CHECK:   [[GEP:%.*]] = getelementptr inbounds { i64, i64 }, ptr [[TMP]], i32 0, i32 1
+// CHECK:   [[R1:%.*]] = load i64, ptr [[GEP]], align 4
 // CHECK:   call swiftcc void @take_struct_2(i64 [[R0]], i64 [[R1]])
 // CHECK:   ret void
 // CHECK: }
@@ -199,29 +190,24 @@ typedef struct {
 TEST(struct_misaligned_1)
 // CHECK-LABEL: define dso_local swiftcc i64 @return_struct_misaligned_1()
 // CHECK:  [[RET:%.*]] = alloca [[STRUCT:%.*]], align 1
-// CHECK:  [[CAST:%.*]] = bitcast [[STRUCT]]* [[RET]] to i8*
-// CHECK:  call void @llvm.memset{{.*}}(i8* align 1 [[CAST]], i8 0, i64 5
-// CHECK:  [[CAST:%.*]] = bitcast [[STRUCT]]* [[RET]] to { i64 }*
-// CHECK:  [[GEP:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[CAST]], i32 0, i32 0
-// CHECK:  [[R0:%.*]] = load i64, i64* [[GEP]], align 1
+// CHECK:  call void @llvm.memset{{.*}}(ptr align 1 [[RET]], i8 0, i64 5
+// CHECK:  [[GEP:%.*]] = getelementptr inbounds { i64 }, ptr [[RET]], i32 0, i32 0
+// CHECK:  [[R0:%.*]] = load i64, ptr [[GEP]], align 1
 // CHECK:  ret i64 [[R0]]
 // CHECK:}
 // CHECK-LABEL: define dso_local swiftcc void @take_struct_misaligned_1(i64 %0) {{.*}}{
 // CHECK:   [[V:%.*]] = alloca [[STRUCT:%.*]], align 1
-// CHECK:   [[CAST:%.*]] = bitcast [[STRUCT]]* [[V]] to { i64 }*
-// CHECK:   [[GEP:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[CAST]], i32 0, i32 0
-// CHECK:   store i64 %0, i64* [[GEP]], align 1
+// CHECK:   [[GEP:%.*]] = getelementptr inbounds { i64 }, ptr [[V]], i32 0, i32 0
+// CHECK:   store i64 %0, ptr [[GEP]], align 1
 // CHECK:   ret void
 // CHECK: }
 // CHECK: define dso_local void @test_struct_misaligned_1() {{.*}}{
 // CHECK:   [[AGG:%.*]] = alloca [[STRUCT:%.*]], align 1
 // CHECK:   [[CALL:%.*]] = call swiftcc i64 @return_struct_misaligned_1()
-// CHECK:   [[T0:%.*]] = bitcast [[STRUCT]]* [[AGG]] to { i64 }*
-// CHECK:   [[T1:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[T0]], i32 0, i32 0
-// CHECK:   store i64 [[CALL]], i64* [[T1]], align 1
-// CHECK:   [[T0:%.*]] = bitcast [[STRUCT]]* [[AGG]] to { i64 }*
-// CHECK:   [[T1:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[T0]], i32 0, i32 0
-// CHECK:   [[P:%.*]] = load i64, i64* [[T1]], align 1
+// CHECK:   [[T1:%.*]] = getelementptr inbounds { i64 }, ptr [[AGG]], i32 0, i32 0
+// CHECK:   store i64 [[CALL]], ptr [[T1]], align 1
+// CHECK:   [[T1:%.*]] = getelementptr inbounds { i64 }, ptr [[AGG]], i32 0, i32 0
+// CHECK:   [[P:%.*]] = load i64, ptr [[T1]], align 1
 // CHECK:   call swiftcc void @take_struct_misaligned_1(i64 [[P]])
 // CHECK:   ret void
 // CHECK: }
@@ -235,7 +221,7 @@ TEST(struct_big_1)
 // CHECK-LABEL: define {{.*}} void @return_struct_big_1({{.*}} noalias sret
 
 // Should not be byval.
-// CHECK-LABEL: define {{.*}} void @take_struct_big_1({{.*}}* noundef{{( %.*)?}})
+// CHECK-LABEL: define {{.*}} void @take_struct_big_1(ptr noundef{{( %.*)?}})
 
 /*****************************************************************************/
 /********************************* TYPE MERGING ******************************/
@@ -248,28 +234,23 @@ typedef union {
 TEST(union_het_fp)
 // CHECK-LABEL: define dso_local swiftcc i64 @return_union_het_fp()
 // CHECK:  [[RET:%.*]] = alloca [[UNION:%.*]], align 8
-// CHECK:  [[CAST:%.*]] = bitcast [[UNION]]* [[RET]] to i8*
-// CHECK:  call void @llvm.memcpy{{.*}}(i8* align {{[0-9]+}} [[CAST]]
-// CHECK:  [[CAST:%.*]] = bitcast [[UNION]]* [[RET]] to { i64 }*
-// CHECK:  [[GEP:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[CAST]], i32 0, i32 0
-// CHECK:  [[R0:%.*]] = load i64, i64* [[GEP]], align 8
+// CHECK:  call void @llvm.memcpy{{.*}}(ptr align {{[0-9]+}} [[RET]]
+// CHECK:  [[GEP:%.*]] = getelementptr inbounds { i64 }, ptr [[RET]], i32 0, i32 0
+// CHECK:  [[R0:%.*]] = load i64, ptr [[GEP]], align 8
 // CHECK:  ret i64 [[R0]]
 // CHECK-LABEL: define dso_local swiftcc void @take_union_het_fp(i64 %0) {{.*}}{
 // CHECK:   [[V:%.*]] = alloca [[UNION:%.*]], align 8
-// CHECK:   [[CAST:%.*]] = bitcast [[UNION]]* [[V]] to { i64 }*
-// CHECK:   [[GEP:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[CAST]], i32 0, i32 0
-// CHECK:   store i64 %0, i64* [[GEP]], align 8
+// CHECK:   [[GEP:%.*]] = getelementptr inbounds { i64 }, ptr [[V]], i32 0, i32 0
+// CHECK:   store i64 %0, ptr [[GEP]], align 8
 // CHECK:   ret void
 // CHECK: }
 // CHECK-LABEL: define dso_local void @test_union_het_fp() {{.*}}{
 // CHECK:   [[AGG:%.*]] = alloca [[UNION:%.*]], align 8
 // CHECK:   [[CALL:%.*]] = call swiftcc i64 @return_union_het_fp()
-// CHECK:   [[T0:%.*]] = bitcast [[UNION]]* [[AGG]] to { i64 }*
-// CHECK:   [[T1:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[T0]], i32 0, i32 0
-// CHECK:   store i64 [[CALL]], i64* [[T1]], align 8
-// CHECK:   [[T0:%.*]] = bitcast [[UNION]]* [[AGG]] to { i64 }*
-// CHECK:   [[T1:%.*]] = getelementptr inbounds { i64 }, { i64 }* [[T0]], i32 0, i32 0
-// CHECK:   [[V0:%.*]] = load i64, i64* [[T1]], align 8
+// CHECK:   [[T1:%.*]] = getelementptr inbounds { i64 }, ptr [[AGG]], i32 0, i32 0
+// CHECK:   store i64 [[CALL]], ptr [[T1]], align 8
+// CHECK:   [[T1:%.*]] = getelementptr inbounds { i64 }, ptr [[AGG]], i32 0, i32 0
+// CHECK:   [[V0:%.*]] = load i64, ptr [[T1]], align 8
 // CHECK:   call swiftcc void @take_union_het_fp(i64 [[V0]])
 // CHECK:   ret void
 // CHECK: }
@@ -283,12 +264,10 @@ TEST(union_hom_fp)
 // CHECK-LABEL: define dso_local void @test_union_hom_fp()
 // CHECK:   [[TMP:%.*]] = alloca [[REC:%.*]], align 4
 // CHECK:   [[CALL:%.*]] = call [[SWIFTCC]] float @return_union_hom_fp()
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG:{ float }]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
-// CHECK:   store float [[CALL]], float* [[T0]], align 4
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP]] to [[AGG]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
-// CHECK:   [[FIRST:%.*]] = load float, float* [[T0]], align 4
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG:{ float }]], ptr [[TMP]], i32 0, i32 0
+// CHECK:   store float [[CALL]], ptr [[T0]], align 4
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP]], i32 0, i32 0
+// CHECK:   [[FIRST:%.*]] = load float, ptr [[T0]], align 4
 // CHECK:   call [[SWIFTCC]] void @take_union_hom_fp(float [[FIRST]])
 // CHECK:   ret void
 
@@ -300,28 +279,26 @@ TEST(union_hom_fp_partial)
 // CHECK: define dso_local void @test_union_hom_fp_partial()
 // CHECK:   [[AGG:%.*]] = alloca [[UNION:%.*]], align 16
 // CHECK:   [[CALL:%.*]] = call swiftcc { float, float, float, float } @return_union_hom_fp_partial()
-// CHECK:   [[CAST:%.*]] = bitcast [[UNION]]* [[AGG]] to { float, float, float, float }*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, { float, float, float, float }* [[CAST]], i32 0, i32 0
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, ptr [[AGG]], i32 0, i32 0
 // CHECK:   [[T1:%.*]] = extractvalue { float, float, float, float } [[CALL]], 0
-// CHECK:   store float [[T1]], float* [[T0]], align 16
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, { float, float, float, float }* [[CAST]], i32 0, i32 1
+// CHECK:   store float [[T1]], ptr [[T0]], align 16
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, ptr [[AGG]], i32 0, i32 1
 // CHECK:   [[T1:%.*]] = extractvalue { float, float, float, float } [[CALL]], 1
-// CHECK:   store float [[T1]], float* [[T0]], align 4
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, { float, float, float, float }* [[CAST]], i32 0, i32 2
+// CHECK:   store float [[T1]], ptr [[T0]], align 4
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, ptr [[AGG]], i32 0, i32 2
 // CHECK:   [[T1:%.*]] = extractvalue { float, float, float, float } [[CALL]], 2
-// CHECK:   store float [[T1]], float* [[T0]], align 8
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, { float, float, float, float }* [[CAST]], i32 0, i32 3
+// CHECK:   store float [[T1]], ptr [[T0]], align 8
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, ptr [[AGG]], i32 0, i32 3
 // CHECK:   [[T1:%.*]] = extractvalue { float, float, float, float } [[CALL]], 3
-// CHECK:   store float [[T1]], float* [[T0]], align 4
-// CHECK:   [[CAST:%.*]] = bitcast [[UNION]]* [[AGG]] to { float, float, float, float }*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, { float, float, float, float }* [[CAST]], i32 0, i32 0
-// CHECK:   [[V0:%.*]] = load float, float* [[T0]], align 16
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, { float, float, float, float }* [[CAST]], i32 0, i32 1
-// CHECK:   [[V1:%.*]] = load float, float* [[T0]], align 4
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, { float, float, float, float }* [[CAST]], i32 0, i32 2
-// CHECK:   [[V2:%.*]] = load float, float* [[T0]], align 8
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, { float, float, float, float }* [[CAST]], i32 0, i32 3
-// CHECK:   [[V3:%.*]] = load float, float* [[T0]], align 4
+// CHECK:   store float [[T1]], ptr [[T0]], align 4
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, ptr [[AGG]], i32 0, i32 0
+// CHECK:   [[V0:%.*]] = load float, ptr [[T0]], align 16
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, ptr [[AGG]], i32 0, i32 1
+// CHECK:   [[V1:%.*]] = load float, ptr [[T0]], align 4
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, ptr [[AGG]], i32 0, i32 2
+// CHECK:   [[V2:%.*]] = load float, ptr [[T0]], align 8
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { float, float, float, float }, ptr [[AGG]], i32 0, i32 3
+// CHECK:   [[V3:%.*]] = load float, ptr [[T0]], align 4
 // CHECK:   call swiftcc void @take_union_hom_fp_partial(float [[V0]], float [[V1]], float [[V2]], float [[V3]])
 // CHECK:   ret void
 // CHECK: }
@@ -334,23 +311,21 @@ TEST(union_het_fpv_partial)
 // CHECK-LABEL: define dso_local void @test_union_het_fpv_partial()
 // CHECK:   [[AGG:%.*]] = alloca [[UNION:%.*]], align 16
 // CHECK:   [[CALL:%.*]] = call swiftcc { i64, float, float } @return_union_het_fpv_partial()
-// CHECK:   [[CAST:%.*]] = bitcast [[UNION]]* [[AGG]] to { i64, float, float }*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, { i64, float, float }* [[CAST]], i32 0, i32 0
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, ptr [[AGG]], i32 0, i32 0
 // CHECK:   [[T1:%.*]] = extractvalue { i64, float, float } [[CALL]], 0
-// CHECK:   store i64 [[T1]], i64* [[T0]], align 16
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, { i64, float, float }* [[CAST]], i32 0, i32 1
+// CHECK:   store i64 [[T1]], ptr [[T0]], align 16
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, ptr [[AGG]], i32 0, i32 1
 // CHECK:   [[T1:%.*]] = extractvalue { i64, float, float } [[CALL]], 1
-// CHECK:   store float [[T1]], float* [[T0]], align 8
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, { i64, float, float }* [[CAST]], i32 0, i32 2
+// CHECK:   store float [[T1]], ptr [[T0]], align 8
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, ptr [[AGG]], i32 0, i32 2
 // CHECK:   [[T1:%.*]] = extractvalue { i64, float, float } [[CALL]], 2
-// CHECK:   store float [[T1]], float* [[T0]], align 4
-// CHECK:   [[CAST:%.*]] = bitcast [[UNION]]* [[AGG]] to { i64, float, float }*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, { i64, float, float }* [[CAST]], i32 0, i32 0
-// CHECK:   [[V0:%.*]] = load i64, i64* [[T0]], align 16
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, { i64, float, float }* [[CAST]], i32 0, i32 1
-// CHECK:   [[V1:%.*]] = load float, float* [[T0]], align 8
-// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, { i64, float, float }* [[CAST]], i32 0, i32 2
-// CHECK:   [[V2:%.*]] = load float, float* [[T0]], align 4
+// CHECK:   store float [[T1]], ptr [[T0]], align 4
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, ptr [[AGG]], i32 0, i32 0
+// CHECK:   [[V0:%.*]] = load i64, ptr [[T0]], align 16
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, ptr [[AGG]], i32 0, i32 1
+// CHECK:   [[V1:%.*]] = load float, ptr [[T0]], align 8
+// CHECK:   [[T0:%.*]] = getelementptr inbounds { i64, float, float }, ptr [[AGG]], i32 0, i32 2
+// CHECK:   [[V2:%.*]] = load float, ptr [[T0]], align 4
 // CHECK:   call swiftcc void @take_union_het_fpv_partial(i64 [[V0]], float [[V1]], float [[V2]])
 // CHECK:   ret void
 // CHECK: }
@@ -370,40 +345,36 @@ TEST(int8)
 // CHECK:   store
 // CHECK:   load
 // CHECK:   store
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ <4 x i32>, <4 x i32> }]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
-// CHECK:   [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
-// CHECK:   [[SECOND:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG:{ <4 x i32>, <4 x i32> }]], ptr [[RET]], i32 0, i32 0
+// CHECK:   [[FIRST:%.*]] = load <4 x i32>, ptr [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[RET]], i32 0, i32 1
+// CHECK:   [[SECOND:%.*]] = load <4 x i32>, ptr [[T0]], align
 // CHECK:   [[T0:%.*]] = insertvalue [[UAGG:{ <4 x i32>, <4 x i32> }]] poison, <4 x i32> [[FIRST]], 0
 // CHECK:   [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], <4 x i32> [[SECOND]], 1
 // CHECK:   ret [[UAGG]] [[T1]]
 // CHECK-LABEL: define {{.*}} @take_int8(<4 x i32> noundef %0, <4 x i32> noundef %1)
 // CHECK:   [[V:%.*]] = alloca [[REC]], align
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
-// CHECK:   store <4 x i32> %0, <4 x i32>* [[T0]], align
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
-// CHECK:   store <4 x i32> %1, <4 x i32>* [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[V]], i32 0, i32 0
+// CHECK:   store <4 x i32> %0, ptr [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[V]], i32 0, i32 1
+// CHECK:   store <4 x i32> %1, ptr [[T0]], align
 // CHECK:   ret void
 // CHECK-LABEL: define dso_local void @test_int8()
 // CHECK:   [[TMP1:%.*]] = alloca [[REC]], align
 // CHECK:   [[TMP2:%.*]] = alloca [[REC]], align
 // CHECK:   [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_int8()
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP1]] to [[AGG]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP1]], i32 0, i32 0
 // CHECK:   [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
-// CHECK:   store <4 x i32> [[T1]], <4 x i32>* [[T0]], align
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
+// CHECK:   store <4 x i32> [[T1]], ptr [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP1]], i32 0, i32 1
 // CHECK:   [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
-// CHECK:   store <4 x i32> [[T1]], <4 x i32>* [[T0]], align
-// CHECK:   [[V:%.*]] = load [[REC]], [[REC]]* [[TMP1]], align
-// CHECK:   store [[REC]] [[V]], [[REC]]* [[TMP2]], align
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP2]] to [[AGG]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
-// CHECK:   [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
-// CHECK:   [[SECOND:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
+// CHECK:   store <4 x i32> [[T1]], ptr [[T0]], align
+// CHECK:   [[V:%.*]] = load [[REC]], ptr [[TMP1]], align
+// CHECK:   store [[REC]] [[V]], ptr [[TMP2]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP2]], i32 0, i32 0
+// CHECK:   [[FIRST:%.*]] = load <4 x i32>, ptr [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP2]], i32 0, i32 1
+// CHECK:   [[SECOND:%.*]] = load <4 x i32>, ptr [[T0]], align
 // CHECK:   call [[SWIFTCC]] void @take_int8(<4 x i32> noundef [[FIRST]], <4 x i32> noundef [[SECOND]])
 // CHECK:   ret void
 
@@ -414,40 +385,36 @@ TEST(int5)
 // CHECK:   store
 // CHECK:   load
 // CHECK:   store
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[RET]] to [[AGG:{ <4 x i32>, i32 }]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
-// CHECK:   [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
-// CHECK:   [[SECOND:%.*]] = load i32, i32* [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG:{ <4 x i32>, i32 }]], ptr [[RET]], i32 0, i32 0
+// CHECK:   [[FIRST:%.*]] = load <4 x i32>, ptr [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[RET]], i32 0, i32 1
+// CHECK:   [[SECOND:%.*]] = load i32, ptr [[T0]], align
 // CHECK:   [[T0:%.*]] = insertvalue [[UAGG:{ <4 x i32>, i32 }]] poison, <4 x i32> [[FIRST]], 0
 // CHECK:   [[T1:%.*]] = insertvalue [[UAGG]] [[T0]], i32 [[SECOND]], 1
 // CHECK:   ret [[UAGG]] [[T1]]
 // CHECK-LABEL: define {{.*}} @take_int5(<4 x i32> %0, i32 %1)
 // CHECK:   [[V:%.*]] = alloca [[REC]], align
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[V]] to [[AGG]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
-// CHECK:   store <4 x i32> %0, <4 x i32>* [[T0]], align
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
-// CHECK:   store i32 %1, i32* [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[V]], i32 0, i32 0
+// CHECK:   store <4 x i32> %0, ptr [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[V]], i32 0, i32 1
+// CHECK:   store i32 %1, ptr [[T0]], align
 // CHECK:   ret void
 // CHECK-LABEL: define dso_local void @test_int5()
 // CHECK:   [[TMP1:%.*]] = alloca [[REC]], align
 // CHECK:   [[TMP2:%.*]] = alloca [[REC]], align
 // CHECK:   [[CALL:%.*]] = call [[SWIFTCC]] [[UAGG]] @return_int5()
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP1]] to [[AGG]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP1]], i32 0, i32 0
 // CHECK:   [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 0
-// CHECK:   store <4 x i32> [[T1]], <4 x i32>* [[T0]], align
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
+// CHECK:   store <4 x i32> [[T1]], ptr [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP1]], i32 0, i32 1
 // CHECK:   [[T1:%.*]] = extractvalue [[UAGG]] [[CALL]], 1
-// CHECK:   store i32 [[T1]], i32* [[T0]], align
-// CHECK:   [[V:%.*]] = load [[REC]], [[REC]]* [[TMP1]], align
-// CHECK:   store [[REC]] [[V]], [[REC]]* [[TMP2]], align
-// CHECK:   [[CAST_TMP:%.*]] = bitcast [[REC]]* [[TMP2]] to [[AGG]]*
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 0
-// CHECK:   [[FIRST:%.*]] = load <4 x i32>, <4 x i32>* [[T0]], align
-// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], [[AGG]]* [[CAST_TMP]], i32 0, i32 1
-// CHECK:   [[SECOND:%.*]] = load i32, i32* [[T0]], align
+// CHECK:   store i32 [[T1]], ptr [[T0]], align
+// CHECK:   [[V:%.*]] = load [[REC]], ptr [[TMP1]], align
+// CHECK:   store [[REC]] [[V]], ptr [[TMP2]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP2]], i32 0, i32 0
+// CHECK:   [[FIRST:%.*]] = load <4 x i32>, ptr [[T0]], align
+// CHECK:   [[T0:%.*]] = getelementptr inbounds [[AGG]], ptr [[TMP2]], i32 0, i32 1
+// CHECK:   [[SECOND:%.*]] = load i32, ptr [[T0]], align
 // CHECK:   call [[SWIFTCC]] void @take_int5(<4 x i32> [[FIRST]], i32 [[SECOND]])
 // CHECK:   ret void
 


        


More information about the cfe-commits mailing list