[llvm] 7385624 - [WebAssembly] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 04:08:17 PST 2022


Author: Nikita Popov
Date: 2022-12-19T13:07:59+01:00
New Revision: 73856247eef35f5336e485dc009842a5b991c421

URL: https://github.com/llvm/llvm-project/commit/73856247eef35f5336e485dc009842a5b991c421
DIFF: https://github.com/llvm/llvm-project/commit/73856247eef35f5336e485dc009842a5b991c421.diff

LOG: [WebAssembly] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/WebAssembly/PR40172.ll
    llvm/test/CodeGen/WebAssembly/PR40267.ll
    llvm/test/CodeGen/WebAssembly/PR41149.ll
    llvm/test/CodeGen/WebAssembly/add-prototypes.ll
    llvm/test/CodeGen/WebAssembly/address-offsets.ll
    llvm/test/CodeGen/WebAssembly/aliases.ll
    llvm/test/CodeGen/WebAssembly/atomic-mem-consistency.ll
    llvm/test/CodeGen/WebAssembly/atomic-pic.ll
    llvm/test/CodeGen/WebAssembly/atomic-rmw.ll
    llvm/test/CodeGen/WebAssembly/bulk-memory.ll
    llvm/test/CodeGen/WebAssembly/bulk-memory64.ll
    llvm/test/CodeGen/WebAssembly/byval.ll
    llvm/test/CodeGen/WebAssembly/call-indirect.ll
    llvm/test/CodeGen/WebAssembly/call-pic.ll
    llvm/test/CodeGen/WebAssembly/call.ll
    llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.ll
    llvm/test/CodeGen/WebAssembly/cfi.ll
    llvm/test/CodeGen/WebAssembly/clear-cache.ll
    llvm/test/CodeGen/WebAssembly/dead-vreg.ll
    llvm/test/CodeGen/WebAssembly/eh-lsda.ll
    llvm/test/CodeGen/WebAssembly/exception.ll
    llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll
    llvm/test/CodeGen/WebAssembly/fast-isel-noreg.ll
    llvm/test/CodeGen/WebAssembly/fast-isel-pr47040.ll
    llvm/test/CodeGen/WebAssembly/fast-isel.ll
    llvm/test/CodeGen/WebAssembly/function-addr-offset.ll
    llvm/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll
    llvm/test/CodeGen/WebAssembly/function-pointer64.ll
    llvm/test/CodeGen/WebAssembly/global-get.ll
    llvm/test/CodeGen/WebAssembly/global-set.ll
    llvm/test/CodeGen/WebAssembly/global_dtors.ll
    llvm/test/CodeGen/WebAssembly/globl.ll
    llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
    llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
    llvm/test/CodeGen/WebAssembly/indirect-import.ll
    llvm/test/CodeGen/WebAssembly/indirectbr.ll
    llvm/test/CodeGen/WebAssembly/inline-asm-m.ll
    llvm/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll
    llvm/test/CodeGen/WebAssembly/inline-asm.ll
    llvm/test/CodeGen/WebAssembly/inlineasm-output-template.ll
    llvm/test/CodeGen/WebAssembly/ir-locals-stackid.ll
    llvm/test/CodeGen/WebAssembly/ir-locals.ll
    llvm/test/CodeGen/WebAssembly/irreducible-cfg-exceptions.ll
    llvm/test/CodeGen/WebAssembly/irreducible-cfg.ll
    llvm/test/CodeGen/WebAssembly/legalize.ll
    llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll
    llvm/test/CodeGen/WebAssembly/load-ext.ll
    llvm/test/CodeGen/WebAssembly/load-store-i1.ll
    llvm/test/CodeGen/WebAssembly/load-store-pic.ll
    llvm/test/CodeGen/WebAssembly/load-store-static.ll
    llvm/test/CodeGen/WebAssembly/load.ll
    llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
    llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
    llvm/test/CodeGen/WebAssembly/lower-em-exceptions-allowed.ll
    llvm/test/CodeGen/WebAssembly/lower-em-exceptions-resume-only.ll
    llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
    llvm/test/CodeGen/WebAssembly/lower-em-sjlj-indirect-setjmp.ll
    llvm/test/CodeGen/WebAssembly/lower-em-sjlj-sret.ll
    llvm/test/CodeGen/WebAssembly/lower-global-dtors.ll
    llvm/test/CodeGen/WebAssembly/main-three-args.ll
    llvm/test/CodeGen/WebAssembly/main-with-args.ll
    llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll
    llvm/test/CodeGen/WebAssembly/multivalue.ll
    llvm/test/CodeGen/WebAssembly/negative-base-reg.ll
    llvm/test/CodeGen/WebAssembly/null-streamer.ll
    llvm/test/CodeGen/WebAssembly/offset-atomics.ll
    llvm/test/CodeGen/WebAssembly/offset-fastisel.ll
    llvm/test/CodeGen/WebAssembly/offset-folding.ll
    llvm/test/CodeGen/WebAssembly/offset.ll
    llvm/test/CodeGen/WebAssembly/only-data.ll
    llvm/test/CodeGen/WebAssembly/pr47375.ll
    llvm/test/CodeGen/WebAssembly/pr51651.ll
    llvm/test/CodeGen/WebAssembly/pr58904.ll
    llvm/test/CodeGen/WebAssembly/reg-stackify.ll
    llvm/test/CodeGen/WebAssembly/return-address-emscripten.ll
    llvm/test/CodeGen/WebAssembly/return-address-unknown.ll
    llvm/test/CodeGen/WebAssembly/return-int32.ll
    llvm/test/CodeGen/WebAssembly/return-void.ll
    llvm/test/CodeGen/WebAssembly/returned.ll
    llvm/test/CodeGen/WebAssembly/simd-build-pair.ll
    llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll
    llvm/test/CodeGen/WebAssembly/simd-illegal-signext.ll
    llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
    llvm/test/CodeGen/WebAssembly/simd-load-promote-wide.ll
    llvm/test/CodeGen/WebAssembly/simd-load-splat.ll
    llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
    llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll
    llvm/test/CodeGen/WebAssembly/simd-offset.ll
    llvm/test/CodeGen/WebAssembly/simd-simplify-demanded-vector-elts.ll
    llvm/test/CodeGen/WebAssembly/stack-alignment.ll
    llvm/test/CodeGen/WebAssembly/stack-protector.ll
    llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll
    llvm/test/CodeGen/WebAssembly/store-trunc.ll
    llvm/test/CodeGen/WebAssembly/store.ll
    llvm/test/CodeGen/WebAssembly/swiftcc.ll
    llvm/test/CodeGen/WebAssembly/switch-in-loop.ll
    llvm/test/CodeGen/WebAssembly/tailcall.ll
    llvm/test/CodeGen/WebAssembly/target-features.ll
    llvm/test/CodeGen/WebAssembly/tls-general-dynamic.ll
    llvm/test/CodeGen/WebAssembly/tls-local-exec.ll
    llvm/test/CodeGen/WebAssembly/umulo-i64.ll
    llvm/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
    llvm/test/CodeGen/WebAssembly/userstack.ll
    llvm/test/CodeGen/WebAssembly/varargs.ll
    llvm/test/CodeGen/WebAssembly/vector-sdiv.ll
    llvm/test/CodeGen/WebAssembly/vtable.ll
    llvm/test/CodeGen/WebAssembly/wasm-eh-em-sjlj-error.ll
    llvm/test/CodeGen/WebAssembly/wasm-eh-sjlj-setjmp-within-catch.ll
    llvm/test/CodeGen/WebAssembly/weak.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/WebAssembly/PR40172.ll b/llvm/test/CodeGen/WebAssembly/PR40172.ll
index 752f3e4dfbb2..f409b99c0c3b 100644
--- a/llvm/test/CodeGen/WebAssembly/PR40172.ll
+++ b/llvm/test/CodeGen/WebAssembly/PR40172.ll
@@ -20,11 +20,10 @@ define void @test(i8 %byte) {
   %x5 = icmp eq i8 %x4, 1
   %x6 = and i8 %byte, 2
   %x7 = icmp eq i8 %x6, 2
-  %x8 = bitcast { i8, i8 }* %t to i8*
   %x9 = zext i1 %x5 to i8
-  store i8 %x9, i8* %x8, align 1
-  %x10 = getelementptr inbounds { i8, i8 }, { i8, i8 }* %t, i32 0, i32 1
+  store i8 %x9, ptr %t, align 1
+  %x10 = getelementptr inbounds { i8, i8 }, ptr %t, i32 0, i32 1
   %x11 = zext i1 %x7 to i8
-  store i8 %x11, i8* %x10, align 1
+  store i8 %x11, ptr %x10, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/PR40267.ll b/llvm/test/CodeGen/WebAssembly/PR40267.ll
index 12d2bcaf2aa3..96fb116eefd9 100644
--- a/llvm/test/CodeGen/WebAssembly/PR40267.ll
+++ b/llvm/test/CodeGen/WebAssembly/PR40267.ll
@@ -8,7 +8,7 @@
 target triple = "wasm32-unknown-unknown"
 
 define void @foo() {
-  %L6 = load i32, i32* undef
+  %L6 = load i32, ptr undef
   br label %BB1
 
 BB1:                                              ; preds = %BB1, %0
@@ -16,6 +16,6 @@ BB1:                                              ; preds = %BB1, %0
   %E1 = extractelement <4 x i32> %bj, i32 0
   %E23 = extractelement <4 x i32> zeroinitializer, i32 %E1
   %I33 = insertelement <4 x i32> undef, i32 %E23, i1 undef
-  store <4 x i32> %I33, <4 x i32>* undef
+  store <4 x i32> %I33, ptr undef
   br label %BB1
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/PR41149.ll b/llvm/test/CodeGen/WebAssembly/PR41149.ll
index d18bd9c4a3b8..a31fd5e83082 100644
--- a/llvm/test/CodeGen/WebAssembly/PR41149.ll
+++ b/llvm/test/CodeGen/WebAssembly/PR41149.ll
@@ -22,10 +22,10 @@ define void @mod() {
 ; CHECK-NEXT:    i32.sub
 ; CHECK-NEXT:    i32.store8 0
 ; CHECK-NEXT:    # fallthrough-return
-  %tmp = load <4 x i8>, <4 x i8>* undef
+  %tmp = load <4 x i8>, ptr undef
   %tmp2 = icmp slt <4 x i8> %tmp, zeroinitializer
   %tmp3 = sub <4 x i8> zeroinitializer, %tmp
   %tmp4 = select <4 x i1> %tmp2, <4 x i8> %tmp3, <4 x i8> %tmp
-  store <4 x i8> %tmp4, <4 x i8>* undef
+  store <4 x i8> %tmp4, ptr undef
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/add-prototypes.ll b/llvm/test/CodeGen/WebAssembly/add-prototypes.ll
index 8730f2ab767e..8e2fedaaff00 100644
--- a/llvm/test/CodeGen/WebAssembly/add-prototypes.ll
+++ b/llvm/test/CodeGen/WebAssembly/add-prototypes.ll
@@ -3,46 +3,42 @@
 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
 target triple = "wasm32-unknown-unknown"
 
-; CHECK: @foo_addr = global i64 (i32)* @foo, align 8
- at foo_addr = global i64 (i32)* bitcast (i64 (...)* @foo to i64 (i32)*), align 8
+; CHECK: @foo_addr = global ptr @foo, align 8
+ at foo_addr = global ptr @foo, align 8
 
-; CHECK: @foo_addr_i8 = global i8* bitcast (i64 (i32)* @foo to i8*), align 8
- at foo_addr_i8 = global i8* bitcast (i64 (...)* @foo to i8*), align 8
+; CHECK: @foo_addr_i8 = global ptr @foo, align 8
+ at foo_addr_i8 = global ptr @foo, align 8
 
 ; CHECK-LABEL: @call_foo
 ; CHECK: %call = call i64 @foo(i32 42)
 define void @call_foo(i32 %a) {
-  %call = call i64 bitcast (i64 (...)* @foo to i64 (i32)*)(i32 42)
+  %call = call i64 @foo(i32 42)
   ret void
 }
 
 ; CHECK-LABEL: @call_foo_ptr
-; CHECK: %1 = bitcast i64 (...)* bitcast (i64 (i32)* @foo to i64 (...)*) to i64 (i32)*
-; CHECK-NEXT: %call = call i64 %1(i32 43)
+; CHECK-NEXT: %call = call i64 @foo(i32 43)
 define i64 @call_foo_ptr(i32 %a) {
-  %1 = bitcast i64 (...)* @foo to i64 (i32)*
-  %call = call i64 (i32) %1(i32 43)
+  %call = call i64 (i32) @foo(i32 43)
   ret i64 %call
 }
 
 ; CHECK-LABEL: @to_intptr_inst
-; CHECK: %1 = bitcast i64 (...)* bitcast (i64 (i32)* @foo to i64 (...)*) to i8*
-; CHECK-NEXT: ret i8* %1
-define i8* @to_intptr_inst() {
-  %1 = bitcast i64 (...)* @foo to i8*
-  ret i8* %1
+; CHECK-NEXT: ret ptr @foo
+define ptr @to_intptr_inst() {
+  ret ptr @foo
 }
 
 ; CHECK-LABEL: @to_intptr_constexpr
-; CHECK: ret i8* bitcast (i64 (i32)* @foo to i8*)
-define i8* @to_intptr_constexpr() {
-  ret i8* bitcast (i64 (...)* @foo to i8*)
+; CHECK: ret ptr @foo
+define ptr @to_intptr_constexpr() {
+  ret ptr @foo
 }
 
 ; CHECK-LABEL: @null_compare
-; CHECK: br i1 icmp eq (i64 (...)* bitcast (i64 (i32)* @foo to i64 (...)*), i64 (...)* null), label %if.then, label %if.end
+; CHECK: br i1 icmp eq (ptr @foo, ptr null), label %if.then, label %if.end
 define i8 @null_compare() {
-  br i1 icmp eq (i64 (...)* @foo, i64 (...)* null), label %if.then, label %if.end
+  br i1 icmp eq (ptr @foo, ptr null), label %if.then, label %if.end
 if.then:
   ret i8 0
 if.end:
@@ -50,24 +46,24 @@ if.end:
 }
 
 ; CHECK-LABEL: @as_paramater
-; CHECK: call void @func_param(i64 (...)* bitcast (i64 (i32)* @foo to i64 (...)*))
+; CHECK: call void @func_param(ptr @foo)
 define void @as_paramater() {
-  call void @func_param(i64 (...)* @foo)
+  call void @func_param(ptr @foo)
   ret void
 }
 
 ; Check if a sret parameter works in a no-prototype function.
 ; CHECK-LABEL: @sret_param
-; CHECK: call void @make_struct_foo(%struct.foo* sret(%struct.foo) %foo)
+; CHECK: call void @make_struct_foo(ptr sret(%struct.foo) %foo)
 %struct.foo = type { i32, i32 }
-declare void @make_struct_foo(%struct.foo* sret(%struct.foo), ...) #1
+declare void @make_struct_foo(ptr sret(%struct.foo), ...) #1
 define void @sret_param() {
   %foo = alloca %struct.foo, align 4
-  call void bitcast (void (%struct.foo*, ...)* @make_struct_foo to void (%struct.foo*)*)(%struct.foo* sret(%struct.foo) %foo)
+  call void @make_struct_foo(ptr sret(%struct.foo) %foo)
   ret void
 }
 
-declare void @func_param(i64 (...)*)
+declare void @func_param(ptr)
 
 ; CHECK: declare void @func_not_called()
 declare void @func_not_called(...) #1

diff  --git a/llvm/test/CodeGen/WebAssembly/address-offsets.ll b/llvm/test/CodeGen/WebAssembly/address-offsets.ll
index 3ddc952d8528..442400b1c1ff 100644
--- a/llvm/test/CodeGen/WebAssembly/address-offsets.ll
+++ b/llvm/test/CodeGen/WebAssembly/address-offsets.ll
@@ -14,7 +14,7 @@ target triple = "wasm32-unknown-emscripten"
 ; PIC-NEXT:   i32.load  $push1=, 40($pop0){{$}}
 ; CHECK-NEXT: return    $pop1{{$}}
 define i32 @load_test0() {
-  %t = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @g, i32 0, i32 10), align 4
+  %t = load i32, ptr getelementptr inbounds ([0 x i32], ptr @g, i32 0, i32 10), align 4
   ret i32 %t
 }
 
@@ -24,7 +24,7 @@ define i32 @load_test0() {
 ; PIC-NEXT:   i32.load  $push1=, 40($pop0){{$}}
 ; CHECK-NEXT: return    $pop1{{$}}
 define i32 @load_test0_noinbounds() {
-  %t = load i32, i32* getelementptr ([0 x i32], [0 x i32]* @g, i32 0, i32 10), align 4
+  %t = load i32, ptr getelementptr ([0 x i32], ptr @g, i32 0, i32 10), align 4
   ret i32 %t
 }
 
@@ -40,8 +40,8 @@ define i32 @load_test0_noinbounds() {
 ; CHECK-NEX T: return    $pop2{{$}}
 define i32 @load_test1(i32 %n) {
   %add = add nsw i32 %n, 10
-  %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  %t = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -53,8 +53,8 @@ define i32 @load_test1(i32 %n) {
 ; CHECK-NEX T: return    $pop2{{$}}
 define i32 @load_test2(i32 %n) {
   %add = add nsw i32 10, %n
-  %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  %t = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -65,9 +65,9 @@ define i32 @load_test2(i32 %n) {
 ; CHECK-NEX T: i32.load  $push2=, g+40($pop1){{$}}
 ; CHECK-NEX T: return    $pop2{{$}}
 define i32 @load_test3(i32 %n) {
-  %add.ptr = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %n
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  %t = load i32, i32* %add.ptr1, align 4
+  %add.ptr = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %n
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  %t = load i32, ptr %add.ptr1, align 4
   ret i32 %t
 }
 
@@ -78,8 +78,8 @@ define i32 @load_test3(i32 %n) {
 ; CHECK-NEX T: i32.load  $push2=, g+40($pop1){{$}}
 ; CHECK-NEX T: return    $pop2{{$}}
 define i32 @load_test4(i32 %n) {
-  %add.ptr = getelementptr inbounds i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @g, i32 0, i32 10), i32 %n
-  %t = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr getelementptr inbounds ([0 x i32], ptr @g, i32 0, i32 10), i32 %n
+  %t = load i32, ptr %add.ptr, align 4
   ret i32 %t
 }
 
@@ -90,8 +90,8 @@ define i32 @load_test4(i32 %n) {
 ; CHECK-NEX T: i32.load  $push2=, g+40($pop1){{$}}
 ; CHECK-NEX T: return    $pop2{{$}}
 define i32 @load_test5(i32 %n) {
-  %add.ptr = getelementptr inbounds i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @g, i32 0, i32 10), i32 %n
-  %t = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr getelementptr inbounds ([0 x i32], ptr @g, i32 0, i32 10), i32 %n
+  %t = load i32, ptr %add.ptr, align 4
   ret i32 %t
 }
 
@@ -103,8 +103,8 @@ define i32 @load_test5(i32 %n) {
 ; CHECK-NEX T: return    $pop2{{$}}
 define i32 @load_test6(i32 %n) {
   %add = add nsw i32 %n, 10
-  %add.ptr = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  %t = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  %t = load i32, ptr %add.ptr, align 4
   ret i32 %t
 }
 
@@ -115,9 +115,9 @@ define i32 @load_test6(i32 %n) {
 ; CHECK-NEX T: i32.load  $push2=, g+40($pop1){{$}}
 ; CHECK-NEX T: return    $pop2{{$}}
 define i32 @load_test7(i32 %n) {
-  %add.ptr = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %n
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  %t = load i32, i32* %add.ptr1, align 4
+  %add.ptr = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %n
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  %t = load i32, ptr %add.ptr1, align 4
   ret i32 %t
 }
 
@@ -129,8 +129,8 @@ define i32 @load_test7(i32 %n) {
 ; CHECK-NEX T: return    $pop2{{$}}
 define i32 @load_test8(i32 %n) {
   %add = add nsw i32 10, %n
-  %add.ptr = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  %t = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  %t = load i32, ptr %add.ptr, align 4
   ret i32 %t
 }
 
@@ -143,7 +143,7 @@ define i32 @load_test8(i32 %n) {
 ; PIC-NEXT: i32.load   $push3=, 0($pop2)
 ; PIC-NEXT: return     $pop3{{$}}
 define i32 @load_test9() {
-  %t = load i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @g, i32 0, i32 1073741814), align 4
+  %t = load i32, ptr getelementptr inbounds ([0 x i32], ptr @g, i32 0, i32 1073741814), align 4
   ret i32 %t
 }
 
@@ -160,8 +160,8 @@ define i32 @load_test9() {
 ; PIC-NEXT:   return    $pop6{{$}}
 define i32 @load_test10(i32 %n) {
   %add = add nsw i32 %n, -10
-  %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  %t = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -169,9 +169,9 @@ define i32 @load_test10(i32 %n) {
 ; CHECK-NEXT: .functype load_test11 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load  $push0=, 40($0){{$}}
 ; CHECK-NEXT: return    $pop0{{$}}
-define i32 @load_test11(i32* %p) {
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 10
-  %t = load i32, i32* %arrayidx, align 4
+define i32 @load_test11(ptr %p) {
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 10
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -181,9 +181,9 @@ define i32 @load_test11(i32* %p) {
 ; CHECK-NEXT: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK-NEXT: i32.load  $push2=, 0($pop1){{$}}
 ; CHECK-NEXT: return    $pop2{{$}}
-define i32 @load_test11_noinbounds(i32* %p) {
-  %arrayidx = getelementptr i32, i32* %p, i32 10
-  %t = load i32, i32* %arrayidx, align 4
+define i32 @load_test11_noinbounds(ptr %p) {
+  %arrayidx = getelementptr i32, ptr %p, i32 10
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -196,10 +196,10 @@ define i32 @load_test11_noinbounds(i32* %p) {
 ; CHECK-NEXT: i32.add   $push4=, $pop2, $pop3{{$}}
 ; CHECK-NEXT: i32.load  $push5=, 0($pop4){{$}}
 ; CHECK-NEXT: return    $pop5{{$}}
-define i32 @load_test12(i32* %p, i32 %n) {
+define i32 @load_test12(ptr %p, i32 %n) {
   %add = add nsw i32 %n, 10
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 %add
-  %t = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 %add
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -212,10 +212,10 @@ define i32 @load_test12(i32* %p, i32 %n) {
 ; CHECK-NEXT: i32.add   $push4=, $pop2, $pop3{{$}}
 ; CHECK-NEXT: i32.load  $push5=, 0($pop4){{$}}
 ; CHECK-NEXT: return    $pop5{{$}}
-define i32 @load_test13(i32* %p, i32 %n) {
+define i32 @load_test13(ptr %p, i32 %n) {
   %add = add nsw i32 10, %n
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 %add
-  %t = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 %add
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -226,10 +226,10 @@ define i32 @load_test13(i32* %p, i32 %n) {
 ; CHECK-NEXT: i32.add   $push2=, $0, $pop1{{$}}
 ; CHECK-NEXT: i32.load  $push3=, 40($pop2){{$}}
 ; CHECK-NEXT: return    $pop3{{$}}
-define i32 @load_test14(i32* %p, i32 %n) {
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %n
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  %t = load i32, i32* %add.ptr1, align 4
+define i32 @load_test14(ptr %p, i32 %n) {
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %n
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  %t = load i32, ptr %add.ptr1, align 4
   ret i32 %t
 }
 
@@ -242,10 +242,10 @@ define i32 @load_test14(i32* %p, i32 %n) {
 ; CHECK-NEXT: i32.add   $push4=, $pop2, $pop3{{$}}
 ; CHECK-NEXT: i32.load  $push5=, 0($pop4){{$}}
 ; CHECK-NEXT: return    $pop5{{$}}
-define i32 @load_test15(i32* %p, i32 %n) {
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 10
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %n
-  %t = load i32, i32* %add.ptr1, align 4
+define i32 @load_test15(ptr %p, i32 %n) {
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 10
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 %n
+  %t = load i32, ptr %add.ptr1, align 4
   ret i32 %t
 }
 
@@ -258,10 +258,10 @@ define i32 @load_test15(i32* %p, i32 %n) {
 ; CHECK-NEXT: i32.add   $push4=, $pop2, $pop3{{$}}
 ; CHECK-NEXT: i32.load  $push5=, 0($pop4){{$}}
 ; CHECK-NEXT: return    $pop5{{$}}
-define i32 @load_test16(i32* %p, i32 %n) {
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 10
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %n
-  %t = load i32, i32* %add.ptr1, align 4
+define i32 @load_test16(ptr %p, i32 %n) {
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 10
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 %n
+  %t = load i32, ptr %add.ptr1, align 4
   ret i32 %t
 }
 
@@ -274,10 +274,10 @@ define i32 @load_test16(i32* %p, i32 %n) {
 ; CHECK-NEXT: i32.add   $push4=, $pop2, $pop3{{$}}
 ; CHECK-NEXT: i32.load  $push5=, 0($pop4){{$}}
 ; CHECK-NEXT: return    $pop5{{$}}
-define i32 @load_test17(i32* %p, i32 %n) {
+define i32 @load_test17(ptr %p, i32 %n) {
   %add = add nsw i32 %n, 10
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %add
-  %t = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %add
+  %t = load i32, ptr %add.ptr, align 4
   ret i32 %t
 }
 
@@ -288,10 +288,10 @@ define i32 @load_test17(i32* %p, i32 %n) {
 ; CHECK-NEXT: i32.add   $push2=, $0, $pop1{{$}}
 ; CHECK-NEXT: i32.load  $push3=, 40($pop2){{$}}
 ; CHECK-NEXT: return    $pop3{{$}}
-define i32 @load_test18(i32* %p, i32 %n) {
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %n
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  %t = load i32, i32* %add.ptr1, align 4
+define i32 @load_test18(ptr %p, i32 %n) {
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %n
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  %t = load i32, ptr %add.ptr1, align 4
   ret i32 %t
 }
 
@@ -304,10 +304,10 @@ define i32 @load_test18(i32* %p, i32 %n) {
 ; CHECK-NEXT: i32.add   $push4=, $pop2, $pop3{{$}}
 ; CHECK-NEXT: i32.load  $push5=, 0($pop4){{$}}
 ; CHECK-NEXT: return    $pop5{{$}}
-define i32 @load_test19(i32* %p, i32 %n) {
+define i32 @load_test19(ptr %p, i32 %n) {
   %add = add nsw i32 10, %n
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %add
-  %t = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %add
+  %t = load i32, ptr %add.ptr, align 4
   ret i32 %t
 }
 
@@ -317,9 +317,9 @@ define i32 @load_test19(i32* %p, i32 %n) {
 ; CHECK-NEXT: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK-NEXT: i32.load  $push2=, 0($pop1){{$}}
 ; CHECK-NEXT: return    $pop2{{$}}
-define i32 @load_test20(i32* %p) {
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 -10
-  %t = load i32, i32* %arrayidx, align 4
+define i32 @load_test20(ptr %p) {
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 -10
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -332,10 +332,10 @@ define i32 @load_test20(i32* %p) {
 ; CHECK-NEXT: i32.add   $push4=, $pop2, $pop3{{$}}
 ; CHECK-NEXT: i32.load  $push5=, 0($pop4){{$}}
 ; CHECK-NEXT: return    $pop5{{$}}
-define i32 @load_test21(i32* %p, i32 %n) {
+define i32 @load_test21(ptr %p, i32 %n) {
   %add = add nsw i32 %n, -10
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 %add
-  %t = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 %add
+  %t = load i32, ptr %arrayidx, align 4
   ret i32 %t
 }
 
@@ -345,7 +345,7 @@ define i32 @load_test21(i32* %p, i32 %n) {
 ; PIC-NEXT:     i32.store 40($pop0), $0
 ; CHECK-NEXT:   return{{$}}
 define void @store_test0(i32 %i) {
-  store i32 %i, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @g, i32 0, i32 10), align 4
+  store i32 %i, ptr getelementptr inbounds ([0 x i32], ptr @g, i32 0, i32 10), align 4
   ret void
 }
 
@@ -355,7 +355,7 @@ define void @store_test0(i32 %i) {
 ; PIC-NEXT:     i32.store 40($pop0), $0{{$}}
 ; CHECK-NEXT:  return{{$}}
 define void @store_test0_noinbounds(i32 %i) {
-  store i32 %i, i32* getelementptr ([0 x i32], [0 x i32]* @g, i32 0, i32 10), align 4
+  store i32 %i, ptr getelementptr ([0 x i32], ptr @g, i32 0, i32 10), align 4
   ret void
 }
 
@@ -365,8 +365,8 @@ define void @store_test0_noinbounds(i32 %i) {
 ; CHECK-NEX T: return{{$}}
 define void @store_test1(i32 %n, i32 %i) {
   %add = add nsw i32 %n, 10
-  %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  store i32 %i, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }
 
@@ -376,8 +376,8 @@ define void @store_test1(i32 %n, i32 %i) {
 ; CHECK-NEX T: return{{$}}
 define void @store_test2(i32 %n, i32 %i) {
   %add = add nsw i32 10, %n
-  %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  store i32 %i, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }
 
@@ -386,9 +386,9 @@ define void @store_test2(i32 %n, i32 %i) {
 ; CHECK-NEX T: i32.store g+40($pop1), $1{{$}}
 ; CHECK-NEX T: return{{$}}
 define void @store_test3(i32 %n, i32 %i) {
-  %add.ptr = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %n
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  store i32 %i, i32* %add.ptr1, align 4
+  %add.ptr = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %n
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  store i32 %i, ptr %add.ptr1, align 4
   ret void
 }
 
@@ -397,8 +397,8 @@ define void @store_test3(i32 %n, i32 %i) {
 ; CHECK-NEX T: i32.store g+40($pop1), $1{{$}}
 ; CHECK-NEX T: return{{$}}
 define void @store_test4(i32 %n, i32 %i) {
-  %add.ptr = getelementptr inbounds i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @g, i32 0, i32 10), i32 %n
-  store i32 %i, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr getelementptr inbounds ([0 x i32], ptr @g, i32 0, i32 10), i32 %n
+  store i32 %i, ptr %add.ptr, align 4
   ret void
 }
 
@@ -407,8 +407,8 @@ define void @store_test4(i32 %n, i32 %i) {
 ; CHECK-NEX T: i32.store g+40($pop1), $1{{$}}
 ; CHECK-NEX T: return{{$}}
 define void @store_test5(i32 %n, i32 %i) {
-  %add.ptr = getelementptr inbounds i32, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @g, i32 0, i32 10), i32 %n
-  store i32 %i, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr getelementptr inbounds ([0 x i32], ptr @g, i32 0, i32 10), i32 %n
+  store i32 %i, ptr %add.ptr, align 4
   ret void
 }
 
@@ -418,8 +418,8 @@ define void @store_test5(i32 %n, i32 %i) {
 ; CHECK-NEX T: return{{$}}
 define void @store_test6(i32 %n, i32 %i) {
   %add = add nsw i32 %n, 10
-  %add.ptr = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  store i32 %i, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  store i32 %i, ptr %add.ptr, align 4
   ret void
 }
 
@@ -428,9 +428,9 @@ define void @store_test6(i32 %n, i32 %i) {
 ; CHECK-NEX T: i32.store g+40($pop1), $1{{$}}
 ; CHECK-NEX T: return{{$}}
 define void @store_test7(i32 %n, i32 %i) {
-  %add.ptr = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %n
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  store i32 %i, i32* %add.ptr1, align 4
+  %add.ptr = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %n
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  store i32 %i, ptr %add.ptr1, align 4
   ret void
 }
 
@@ -440,8 +440,8 @@ define void @store_test7(i32 %n, i32 %i) {
 ; CHECK-NEX T: return{{$}}
 define void @store_test8(i32 %n, i32 %i) {
   %add = add nsw i32 10, %n
-  %add.ptr = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  store i32 %i, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  store i32 %i, ptr %add.ptr, align 4
   ret void
 }
 
@@ -453,7 +453,7 @@ define void @store_test8(i32 %n, i32 %i) {
 ; PIC-NEXT:      i32.store  0($pop2), $0
 ; CHECK-NEXT:  return{{$}}
 define void @store_test9(i32 %i) {
-  store i32 %i, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @g, i32 0, i32 1073741814), align 4
+  store i32 %i, ptr getelementptr inbounds ([0 x i32], ptr @g, i32 0, i32 1073741814), align 4
   ret void
 }
 
@@ -469,8 +469,8 @@ define void @store_test9(i32 %i) {
 ; CHECK-NEXT:  return{{$}}
 define void @store_test10(i32 %n, i32 %i) {
   %add = add nsw i32 %n, -10
-  %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @g, i32 0, i32 %add
-  store i32 %i, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [0 x i32], ptr @g, i32 0, i32 %add
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }
 
@@ -478,9 +478,9 @@ define void @store_test10(i32 %n, i32 %i) {
 ; CHECK-NEXT: .functype store_test11 (i32, i32) -> (){{$}}
 ; CHECK-NEXT:  i32.store 40($0), $1{{$}}
 ; CHECK-NEXT:  return{{$}}
-define void @store_test11(i32* %p, i32 %i) {
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 10
-  store i32 %i, i32* %arrayidx, align 4
+define void @store_test11(ptr %p, i32 %i) {
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 10
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }
 
@@ -490,97 +490,97 @@ define void @store_test11(i32* %p, i32 %i) {
 ; CHECK-NEXT:  i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK-NEXT:  i32.store 0($pop1), $1{{$}}
 ; CHECK-NEXT:  return{{$}}
-define void @store_test11_noinbounds(i32* %p, i32 %i) {
-  %arrayidx = getelementptr i32, i32* %p, i32 10
-  store i32 %i, i32* %arrayidx, align 4
+define void @store_test11_noinbounds(ptr %p, i32 %i) {
+  %arrayidx = getelementptr i32, ptr %p, i32 10
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test12:
 ; CHECK-NEXT: .functype store_test12 (i32, i32, i32) -> (){{$}}
-define void @store_test12(i32* %p, i32 %n, i32 %i) {
+define void @store_test12(ptr %p, i32 %n, i32 %i) {
   %add = add nsw i32 %n, 10
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 %add
-  store i32 %i, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 %add
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test13:
 ; CHECK-NEXT: .functype store_test13 (i32, i32, i32) -> (){{$}}
-define void @store_test13(i32* %p, i32 %n, i32 %i) {
+define void @store_test13(ptr %p, i32 %n, i32 %i) {
   %add = add nsw i32 10, %n
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 %add
-  store i32 %i, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 %add
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test14:
 ; CHECK-NEXT: .functype store_test14 (i32, i32, i32) -> (){{$}}
-define void @store_test14(i32* %p, i32 %n, i32 %i) {
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %n
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  store i32 %i, i32* %add.ptr1, align 4
+define void @store_test14(ptr %p, i32 %n, i32 %i) {
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %n
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  store i32 %i, ptr %add.ptr1, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test15:
 ; CHECK-NEXT: .functype store_test15 (i32, i32, i32) -> (){{$}}
-define void @store_test15(i32* %p, i32 %n, i32 %i) {
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 10
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %n
-  store i32 %i, i32* %add.ptr1, align 4
+define void @store_test15(ptr %p, i32 %n, i32 %i) {
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 10
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 %n
+  store i32 %i, ptr %add.ptr1, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test16:
 ; CHECK-NEXT: .functype store_test16 (i32, i32, i32) -> (){{$}}
-define void @store_test16(i32* %p, i32 %n, i32 %i) {
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 10
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %n
-  store i32 %i, i32* %add.ptr1, align 4
+define void @store_test16(ptr %p, i32 %n, i32 %i) {
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 10
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 %n
+  store i32 %i, ptr %add.ptr1, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test17:
 ; CHECK-NEXT: .functype store_test17 (i32, i32, i32) -> (){{$}}
-define void @store_test17(i32* %p, i32 %n, i32 %i) {
+define void @store_test17(ptr %p, i32 %n, i32 %i) {
   %add = add nsw i32 %n, 10
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %add
-  store i32 %i, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %add
+  store i32 %i, ptr %add.ptr, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test18:
 ; CHECK-NEXT: .functype store_test18 (i32, i32, i32) -> (){{$}}
-define void @store_test18(i32* %p, i32 %n, i32 %i) {
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %n
-  %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
-  store i32 %i, i32* %add.ptr1, align 4
+define void @store_test18(ptr %p, i32 %n, i32 %i) {
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %n
+  %add.ptr1 = getelementptr inbounds i32, ptr %add.ptr, i32 10
+  store i32 %i, ptr %add.ptr1, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test19:
 ; CHECK-NEXT: .functype store_test19 (i32, i32, i32) -> (){{$}}
-define void @store_test19(i32* %p, i32 %n, i32 %i) {
+define void @store_test19(ptr %p, i32 %n, i32 %i) {
   %add = add nsw i32 10, %n
-  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %add
-  store i32 %i, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i32 %add
+  store i32 %i, ptr %add.ptr, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test20:
 ; CHECK-NEXT: .functype store_test20 (i32, i32) -> (){{$}}
-define void @store_test20(i32* %p, i32 %i) {
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 -10
-  store i32 %i, i32* %arrayidx, align 4
+define void @store_test20(ptr %p, i32 %i) {
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 -10
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }
 
 ; CHECK-LABEL: store_test21:
 ; CHECK-NEXT: .functype store_test21 (i32, i32, i32) -> (){{$}}
-define void @store_test21(i32* %p, i32 %n, i32 %i) {
+define void @store_test21(ptr %p, i32 %n, i32 %i) {
   %add = add nsw i32 %n, -10
-  %arrayidx = getelementptr inbounds i32, i32* %p, i32 %add
-  store i32 %i, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %p, i32 %add
+  store i32 %i, ptr %arrayidx, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/aliases.ll b/llvm/test/CodeGen/WebAssembly/aliases.ll
index 45eacc899fe2..91b57b90df1d 100644
--- a/llvm/test/CodeGen/WebAssembly/aliases.ll
+++ b/llvm/test/CodeGen/WebAssembly/aliases.ll
@@ -5,11 +5,11 @@
 
 ; CHECK-DAG: .globl	foo1
 ; CHECK-DAG: .set foo1, bar
- at foo1 = alias i32, i32* @bar
+ at foo1 = alias i32, ptr @bar
 
 ; CHECK-DAG: .globl	foo2
 ; CHECK-DAG: .set foo2, bar
- at foo2 = alias i32, i32* @bar
+ at foo2 = alias i32, ptr @bar
 
 %FunTy = type i32()
 
@@ -20,47 +20,47 @@ define i32 @foo_f() {
 ; CHECK-DAG: .weak	bar_f
 ; CHECK-DAG: .type	bar_f, at function
 ; CHECK-DAG: .set bar_f, foo_f
- at bar_f = weak alias %FunTy, %FunTy* @foo_f
+ at bar_f = weak alias %FunTy, ptr @foo_f
 
 ; CHECK-DAG: .weak	bar_l
 ; CHECK-DAG: .set bar_l, bar
- at bar_l = linkonce_odr alias i32, i32* @bar
+ at bar_l = linkonce_odr alias i32, ptr @bar
 
 ; CHECK-DAG: .set bar_i, bar
- at bar_i = internal alias i32, i32* @bar
+ at bar_i = internal alias i32, ptr @bar
 
 ; CHECK-DAG: .globl	A
- at A = alias i64, bitcast (i32* @bar to i64*)
+ at A = alias i64, ptr @bar
 
 ; CHECK-DAG: .globl	bar_h
 ; CHECK-DAG: .hidden	bar_h
 ; CHECK-DAG: .set bar_h, bar
- at bar_h = hidden alias i32, i32* @bar
+ at bar_h = hidden alias i32, ptr @bar
 
 ; CHECK-DAG: .globl	bar_p
 ; CHECK-DAG: .protected	bar_p
 ; CHECK-DAG: .set bar_p, bar
- at bar_p = protected alias i32, i32* @bar
+ at bar_p = protected alias i32, ptr @bar
 
 ; CHECK-DAG: .set test2, bar+4
- at test2 = alias i32, getelementptr(i32, i32* @bar, i32 1)
+ at test2 = alias i32, getelementptr(i32, ptr @bar, i32 1)
 
 ; CHECK-DAG: .set test3, 42
- at test3 = alias i32, inttoptr(i32 42 to i32*)
+ at test3 = alias i32, inttoptr(i32 42 to ptr)
 
 ; CHECK-DAG: .set test4, bar
- at test4 = alias i32, inttoptr(i64 ptrtoint (i32* @bar to i64) to i32*)
+ at test4 = alias i32, inttoptr(i64 ptrtoint (ptr @bar to i64) to ptr)
 
 ; CHECK-DAG: .set test5, test2-bar
- at test5 = alias i32, inttoptr(i32 sub (i32 ptrtoint (i32* @test2 to i32),
-                                 i32 ptrtoint (i32* @bar to i32)) to i32*)
+ at test5 = alias i32, inttoptr(i32 sub (i32 ptrtoint (ptr @test2 to i32),
+                                 i32 ptrtoint (ptr @bar to i32)) to ptr)
 
 ; CHECK-DAG: .globl	test
 define i32 @test() {
 entry:
-   %tmp = load i32, i32* @foo1
-   %tmp1 = load i32, i32* @foo2
-   %tmp0 = load i32, i32* @bar_i
+   %tmp = load i32, ptr @foo1
+   %tmp1 = load i32, ptr @foo2
+   %tmp0 = load i32, ptr @bar_i
    %tmp2 = call i32 @foo_f()
    %tmp3 = add i32 %tmp, %tmp2
    %tmp4 = call i32 @bar_f()

diff  --git a/llvm/test/CodeGen/WebAssembly/atomic-mem-consistency.ll b/llvm/test/CodeGen/WebAssembly/atomic-mem-consistency.ll
index ed2fc6fcf844..5e9a0060c6ec 100644
--- a/llvm/test/CodeGen/WebAssembly/atomic-mem-consistency.ll
+++ b/llvm/test/CodeGen/WebAssembly/atomic-mem-consistency.ll
@@ -16,32 +16,32 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-LABEL: load_i32_unordered:
 ; CHECK: i32.atomic.load $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @load_i32_unordered(i32 *%p) {
-  %v = load atomic i32, i32* %p unordered, align 4
+define i32 @load_i32_unordered(ptr %p) {
+  %v = load atomic i32, ptr %p unordered, align 4
   ret i32 %v
 }
 
 ; CHECK-LABEL: load_i32_monotonic:
 ; CHECK: i32.atomic.load $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @load_i32_monotonic(i32 *%p) {
-  %v = load atomic i32, i32* %p monotonic, align 4
+define i32 @load_i32_monotonic(ptr %p) {
+  %v = load atomic i32, ptr %p monotonic, align 4
   ret i32 %v
 }
 
 ; CHECK-LABEL: load_i32_acquire:
 ; CHECK: i32.atomic.load $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @load_i32_acquire(i32 *%p) {
-  %v = load atomic i32, i32* %p acquire, align 4
+define i32 @load_i32_acquire(ptr %p) {
+  %v = load atomic i32, ptr %p acquire, align 4
   ret i32 %v
 }
 
 ; CHECK-LABEL: load_i32_seq_cst:
 ; CHECK: i32.atomic.load $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @load_i32_seq_cst(i32 *%p) {
-  %v = load atomic i32, i32* %p seq_cst, align 4
+define i32 @load_i32_seq_cst(ptr %p) {
+  %v = load atomic i32, ptr %p seq_cst, align 4
   ret i32 %v
 }
 
@@ -55,8 +55,8 @@ define i32 @load_i32_seq_cst(i32 *%p) {
 ; CHECK-NEXT: .functype store_i32_unordered (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @store_i32_unordered(i32 *%p, i32 %v) {
-  store atomic i32 %v, i32* %p unordered, align 4
+define void @store_i32_unordered(ptr %p, i32 %v) {
+  store atomic i32 %v, ptr %p unordered, align 4
   ret void
 }
 
@@ -64,8 +64,8 @@ define void @store_i32_unordered(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype store_i32_monotonic (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @store_i32_monotonic(i32 *%p, i32 %v) {
-  store atomic i32 %v, i32* %p monotonic, align 4
+define void @store_i32_monotonic(ptr %p, i32 %v) {
+  store atomic i32 %v, ptr %p monotonic, align 4
   ret void
 }
 
@@ -73,8 +73,8 @@ define void @store_i32_monotonic(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype store_i32_release (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @store_i32_release(i32 *%p, i32 %v) {
-  store atomic i32 %v, i32* %p release, align 4
+define void @store_i32_release(ptr %p, i32 %v) {
+  store atomic i32 %v, ptr %p release, align 4
   ret void
 }
 
@@ -82,8 +82,8 @@ define void @store_i32_release(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype store_i32_seq_cst (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @store_i32_seq_cst(i32 *%p, i32 %v) {
-  store atomic i32 %v, i32* %p seq_cst, align 4
+define void @store_i32_seq_cst(ptr %p, i32 %v) {
+  store atomic i32 %v, ptr %p seq_cst, align 4
   ret void
 }
 
@@ -98,8 +98,8 @@ define void @store_i32_seq_cst(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype add_i32_monotonic (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @add_i32_monotonic(i32* %p, i32 %v) {
-  %old = atomicrmw add i32* %p, i32 %v monotonic
+define i32 @add_i32_monotonic(ptr %p, i32 %v) {
+  %old = atomicrmw add ptr %p, i32 %v monotonic
   ret i32 %old
 }
 
@@ -107,8 +107,8 @@ define i32 @add_i32_monotonic(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype add_i32_acquire (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @add_i32_acquire(i32* %p, i32 %v) {
-  %old = atomicrmw add i32* %p, i32 %v acquire
+define i32 @add_i32_acquire(ptr %p, i32 %v) {
+  %old = atomicrmw add ptr %p, i32 %v acquire
   ret i32 %old
 }
 
@@ -116,8 +116,8 @@ define i32 @add_i32_acquire(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype add_i32_release (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @add_i32_release(i32* %p, i32 %v) {
-  %old = atomicrmw add i32* %p, i32 %v release
+define i32 @add_i32_release(ptr %p, i32 %v) {
+  %old = atomicrmw add ptr %p, i32 %v release
   ret i32 %old
 }
 
@@ -125,8 +125,8 @@ define i32 @add_i32_release(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype add_i32_acq_rel (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @add_i32_acq_rel(i32* %p, i32 %v) {
-  %old = atomicrmw add i32* %p, i32 %v acq_rel
+define i32 @add_i32_acq_rel(ptr %p, i32 %v) {
+  %old = atomicrmw add ptr %p, i32 %v acq_rel
   ret i32 %old
 }
 
@@ -134,8 +134,8 @@ define i32 @add_i32_acq_rel(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype add_i32_seq_cst (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @add_i32_seq_cst(i32* %p, i32 %v) {
-  %old = atomicrmw add i32* %p, i32 %v seq_cst
+define i32 @add_i32_seq_cst(ptr %p, i32 %v) {
+  %old = atomicrmw add ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -149,8 +149,8 @@ define i32 @add_i32_seq_cst(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype cmpxchg_i32_monotonic_monotonic (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_monotonic_monotonic(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new monotonic monotonic
+define i32 @cmpxchg_i32_monotonic_monotonic(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new monotonic monotonic
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -159,8 +159,8 @@ define i32 @cmpxchg_i32_monotonic_monotonic(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_acquire_monotonic (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_acquire_monotonic(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new acquire monotonic
+define i32 @cmpxchg_i32_acquire_monotonic(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new acquire monotonic
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -169,8 +169,8 @@ define i32 @cmpxchg_i32_acquire_monotonic(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_release_monotonic (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_release_monotonic(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new release monotonic
+define i32 @cmpxchg_i32_release_monotonic(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new release monotonic
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -179,8 +179,8 @@ define i32 @cmpxchg_i32_release_monotonic(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_acq_rel_monotonic (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_acq_rel_monotonic(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new acq_rel monotonic
+define i32 @cmpxchg_i32_acq_rel_monotonic(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new acq_rel monotonic
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -189,8 +189,8 @@ define i32 @cmpxchg_i32_acq_rel_monotonic(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_seq_cst_monotonic (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_seq_cst_monotonic(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new seq_cst monotonic
+define i32 @cmpxchg_i32_seq_cst_monotonic(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new seq_cst monotonic
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -199,8 +199,8 @@ define i32 @cmpxchg_i32_seq_cst_monotonic(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_acquire_acquire (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_acquire_acquire(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new acquire acquire
+define i32 @cmpxchg_i32_acquire_acquire(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new acquire acquire
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -209,8 +209,8 @@ define i32 @cmpxchg_i32_acquire_acquire(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_release_acquire (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_release_acquire(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new release acquire
+define i32 @cmpxchg_i32_release_acquire(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new release acquire
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -219,8 +219,8 @@ define i32 @cmpxchg_i32_release_acquire(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_acq_rel_acquire (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_acq_rel_acquire(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new acq_rel acquire
+define i32 @cmpxchg_i32_acq_rel_acquire(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new acq_rel acquire
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -229,8 +229,8 @@ define i32 @cmpxchg_i32_acq_rel_acquire(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_seq_cst_acquire (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_seq_cst_acquire(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new seq_cst acquire
+define i32 @cmpxchg_i32_seq_cst_acquire(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new seq_cst acquire
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -239,8 +239,8 @@ define i32 @cmpxchg_i32_seq_cst_acquire(i32* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i32_seq_cst_seq_cst (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_seq_cst_seq_cst(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new seq_cst seq_cst
+define i32 @cmpxchg_i32_seq_cst_seq_cst(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/atomic-pic.ll b/llvm/test/CodeGen/WebAssembly/atomic-pic.ll
index 8f2014873fbf..9443bcb8236d 100644
--- a/llvm/test/CodeGen/WebAssembly/atomic-pic.ll
+++ b/llvm/test/CodeGen/WebAssembly/atomic-pic.ll
@@ -16,7 +16,7 @@ define i32 @rmw_add_external_global() {
 ; CHECK-NEXT:    i32.const $push[[L1:[0-9]+]]=, 42{{$}}
 ; CHECK-NEXT:    i32.atomic.rmw.add $push[[L2:[0-9]+]]=, 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT:    end_function
-  %1 = atomicrmw add i32* @external_global, i32 42 seq_cst
+  %1 = atomicrmw add ptr @external_global, i32 42 seq_cst
   ret i32 %1
 }
 
@@ -28,6 +28,6 @@ define i32 @rmw_add_hidden_global() {
 ; CHECK-NEXT:    i32.const $push[[L3:[0-9]+]]=, 42{{$}}
 ; CHECK-NEXT:    i32.atomic.rmw.add $push[[L4:[0-9]+]]=, 0($pop[[L2]]), $pop[[L3]]{{$}}
 ; CHECK-NEXT:    end_function
-  %1 = atomicrmw add i32* @hidden_global, i32 42 seq_cst
+  %1 = atomicrmw add ptr @hidden_global, i32 42 seq_cst
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/atomic-rmw.ll b/llvm/test/CodeGen/WebAssembly/atomic-rmw.ll
index f391f40d68ce..047147c74bde 100644
--- a/llvm/test/CodeGen/WebAssembly/atomic-rmw.ll
+++ b/llvm/test/CodeGen/WebAssembly/atomic-rmw.ll
@@ -13,8 +13,8 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-NEXT: .functype add_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @add_i32(i32* %p, i32 %v) {
-  %old = atomicrmw add i32* %p, i32 %v seq_cst
+define i32 @add_i32(ptr %p, i32 %v) {
+  %old = atomicrmw add ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -22,8 +22,8 @@ define i32 @add_i32(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype sub_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.sub $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @sub_i32(i32* %p, i32 %v) {
-  %old = atomicrmw sub i32* %p, i32 %v seq_cst
+define i32 @sub_i32(ptr %p, i32 %v) {
+  %old = atomicrmw sub ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -31,8 +31,8 @@ define i32 @sub_i32(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype and_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.and $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @and_i32(i32* %p, i32 %v) {
-  %old = atomicrmw and i32* %p, i32 %v seq_cst
+define i32 @and_i32(ptr %p, i32 %v) {
+  %old = atomicrmw and ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -40,8 +40,8 @@ define i32 @and_i32(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype or_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.or $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @or_i32(i32* %p, i32 %v) {
-  %old = atomicrmw or i32* %p, i32 %v seq_cst
+define i32 @or_i32(ptr %p, i32 %v) {
+  %old = atomicrmw or ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -49,8 +49,8 @@ define i32 @or_i32(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype xor_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.xor $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @xor_i32(i32* %p, i32 %v) {
-  %old = atomicrmw xor i32* %p, i32 %v seq_cst
+define i32 @xor_i32(ptr %p, i32 %v) {
+  %old = atomicrmw xor ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -58,8 +58,8 @@ define i32 @xor_i32(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype xchg_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.xchg $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @xchg_i32(i32* %p, i32 %v) {
-  %old = atomicrmw xchg i32* %p, i32 %v seq_cst
+define i32 @xchg_i32(ptr %p, i32 %v) {
+  %old = atomicrmw xchg ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -67,8 +67,8 @@ define i32 @xchg_i32(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype cmpxchg_i32_loaded_value (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_loaded_value(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new seq_cst seq_cst
+define i32 @cmpxchg_i32_loaded_value(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -78,8 +78,8 @@ define i32 @cmpxchg_i32_loaded_value(i32* %p, i32 %exp, i32 %new) {
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.eq $push1=, $pop0, $1{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i1 @cmpxchg_i32_success(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new seq_cst seq_cst
+define i1 @cmpxchg_i32_success(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new seq_cst seq_cst
   %succ = extractvalue { i32, i1 } %pair, 1
   ret i1 %succ
 }
@@ -91,8 +91,8 @@ define i1 @cmpxchg_i32_success(i32* %p, i32 %exp, i32 %new) {
 ; CHECK: i32.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i32 @nand_i32(i32* %p, i32 %v) {
-  %old = atomicrmw nand i32* %p, i32 %v seq_cst
+define i32 @nand_i32(ptr %p, i32 %v) {
+  %old = atomicrmw nand ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -101,8 +101,8 @@ define i32 @nand_i32(i32* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i32 @max_i32(i32* %p, i32 %v) {
-  %old = atomicrmw max i32* %p, i32 %v seq_cst
+define i32 @max_i32(ptr %p, i32 %v) {
+  %old = atomicrmw max ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -111,8 +111,8 @@ define i32 @max_i32(i32* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i32 @min_i32(i32* %p, i32 %v) {
-  %old = atomicrmw min i32* %p, i32 %v seq_cst
+define i32 @min_i32(ptr %p, i32 %v) {
+  %old = atomicrmw min ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -121,8 +121,8 @@ define i32 @min_i32(i32* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i32 @umax_i32(i32* %p, i32 %v) {
-  %old = atomicrmw umax i32* %p, i32 %v seq_cst
+define i32 @umax_i32(ptr %p, i32 %v) {
+  %old = atomicrmw umax ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -131,8 +131,8 @@ define i32 @umax_i32(i32* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i32 @umin_i32(i32* %p, i32 %v) {
-  %old = atomicrmw umin i32* %p, i32 %v seq_cst
+define i32 @umin_i32(ptr %p, i32 %v) {
+  %old = atomicrmw umin ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -144,8 +144,8 @@ define i32 @umin_i32(i32* %p, i32 %v) {
 ; CHECK-NEXT: .functype add_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @add_i64(i64* %p, i64 %v) {
-  %old = atomicrmw add i64* %p, i64 %v seq_cst
+define i64 @add_i64(ptr %p, i64 %v) {
+  %old = atomicrmw add ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -153,8 +153,8 @@ define i64 @add_i64(i64* %p, i64 %v) {
 ; CHECK-NEXT: .functype sub_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.sub $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @sub_i64(i64* %p, i64 %v) {
-  %old = atomicrmw sub i64* %p, i64 %v seq_cst
+define i64 @sub_i64(ptr %p, i64 %v) {
+  %old = atomicrmw sub ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -162,8 +162,8 @@ define i64 @sub_i64(i64* %p, i64 %v) {
 ; CHECK-NEXT: .functype and_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.and $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @and_i64(i64* %p, i64 %v) {
-  %old = atomicrmw and i64* %p, i64 %v seq_cst
+define i64 @and_i64(ptr %p, i64 %v) {
+  %old = atomicrmw and ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -171,8 +171,8 @@ define i64 @and_i64(i64* %p, i64 %v) {
 ; CHECK-NEXT: .functype or_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.or $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @or_i64(i64* %p, i64 %v) {
-  %old = atomicrmw or i64* %p, i64 %v seq_cst
+define i64 @or_i64(ptr %p, i64 %v) {
+  %old = atomicrmw or ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -180,8 +180,8 @@ define i64 @or_i64(i64* %p, i64 %v) {
 ; CHECK-NEXT: .functype xor_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.xor $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @xor_i64(i64* %p, i64 %v) {
-  %old = atomicrmw xor i64* %p, i64 %v seq_cst
+define i64 @xor_i64(ptr %p, i64 %v) {
+  %old = atomicrmw xor ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -189,8 +189,8 @@ define i64 @xor_i64(i64* %p, i64 %v) {
 ; CHECK-NEXT: .functype xchg_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.xchg $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @xchg_i64(i64* %p, i64 %v) {
-  %old = atomicrmw xchg i64* %p, i64 %v seq_cst
+define i64 @xchg_i64(ptr %p, i64 %v) {
+  %old = atomicrmw xchg ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -198,8 +198,8 @@ define i64 @xchg_i64(i64* %p, i64 %v) {
 ; CHECK-NEXT: .functype cmpxchg_i64_loaded_value (i32, i64, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @cmpxchg_i64_loaded_value(i64* %p, i64 %exp, i64 %new) {
-  %pair = cmpxchg i64* %p, i64 %exp, i64 %new seq_cst seq_cst
+define i64 @cmpxchg_i64_loaded_value(ptr %p, i64 %exp, i64 %new) {
+  %pair = cmpxchg ptr %p, i64 %exp, i64 %new seq_cst seq_cst
   %old = extractvalue { i64, i1 } %pair, 0
   ret i64 %old
 }
@@ -209,8 +209,8 @@ define i64 @cmpxchg_i64_loaded_value(i64* %p, i64 %exp, i64 %new) {
 ; CHECK: i64.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i64.eq $push1=, $pop0, $1{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i1 @cmpxchg_i64_success(i64* %p, i64 %exp, i64 %new) {
-  %pair = cmpxchg i64* %p, i64 %exp, i64 %new seq_cst seq_cst
+define i1 @cmpxchg_i64_success(ptr %p, i64 %exp, i64 %new) {
+  %pair = cmpxchg ptr %p, i64 %exp, i64 %new seq_cst seq_cst
   %succ = extractvalue { i64, i1 } %pair, 1
   ret i1 %succ
 }
@@ -222,8 +222,8 @@ define i1 @cmpxchg_i64_success(i64* %p, i64 %exp, i64 %new) {
 ; CHECK: i64.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i64 @nand_i64(i64* %p, i64 %v) {
-  %old = atomicrmw nand i64* %p, i64 %v seq_cst
+define i64 @nand_i64(ptr %p, i64 %v) {
+  %old = atomicrmw nand ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -232,8 +232,8 @@ define i64 @nand_i64(i64* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i64 @max_i64(i64* %p, i64 %v) {
-  %old = atomicrmw max i64* %p, i64 %v seq_cst
+define i64 @max_i64(ptr %p, i64 %v) {
+  %old = atomicrmw max ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -242,8 +242,8 @@ define i64 @max_i64(i64* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i64 @min_i64(i64* %p, i64 %v) {
-  %old = atomicrmw min i64* %p, i64 %v seq_cst
+define i64 @min_i64(ptr %p, i64 %v) {
+  %old = atomicrmw min ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -252,8 +252,8 @@ define i64 @min_i64(i64* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i64 @umax_i64(i64* %p, i64 %v) {
-  %old = atomicrmw umax i64* %p, i64 %v seq_cst
+define i64 @umax_i64(ptr %p, i64 %v) {
+  %old = atomicrmw umax ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -262,8 +262,8 @@ define i64 @umax_i64(i64* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw.cmpxchg
 ; CHECK: br_if 0
 ; CHECK: end_loop
-define i64 @umin_i64(i64* %p, i64 %v) {
-  %old = atomicrmw umin i64* %p, i64 %v seq_cst
+define i64 @umin_i64(ptr %p, i64 %v) {
+  %old = atomicrmw umin ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -278,9 +278,9 @@ define i64 @umin_i64(i64* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @add_sext_i8_i32(i8* %p, i32 %v) {
+define i32 @add_sext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %p, i8 %t seq_cst
+  %old = atomicrmw add ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i32
   ret i32 %e
 }
@@ -290,9 +290,9 @@ define i32 @add_sext_i8_i32(i8* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw16.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @add_sext_i16_i32(i16* %p, i32 %v) {
+define i32 @add_sext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw add i16* %p, i16 %t seq_cst
+  %old = atomicrmw add ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i32
   ret i32 %e
 }
@@ -302,9 +302,9 @@ define i32 @add_sext_i16_i32(i16* %p, i32 %v) {
 ; CHECK: i64.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @add_sext_i8_i64(i8* %p, i64 %v) {
+define i64 @add_sext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw add i8* %p, i8 %t seq_cst
+  %old = atomicrmw add ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i64
   ret i64 %e
 }
@@ -314,9 +314,9 @@ define i64 @add_sext_i8_i64(i8* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw16.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @add_sext_i16_i64(i16* %p, i64 %v) {
+define i64 @add_sext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw add i16* %p, i16 %t seq_cst
+  %old = atomicrmw add ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i64
   ret i64 %e
 }
@@ -328,9 +328,9 @@ define i64 @add_sext_i16_i64(i16* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw.add $push1=, 0($0), $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
-define i64 @add_sext_i32_i64(i32* %p, i64 %v) {
+define i64 @add_sext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw add i32* %p, i32 %t seq_cst
+  %old = atomicrmw add ptr %p, i32 %t seq_cst
   %e = sext i32 %old to i64
   ret i64 %e
 }
@@ -342,9 +342,9 @@ define i64 @add_sext_i32_i64(i32* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw8.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @sub_sext_i8_i32(i8* %p, i32 %v) {
+define i32 @sub_sext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw sub i8* %p, i8 %t seq_cst
+  %old = atomicrmw sub ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i32
   ret i32 %e
 }
@@ -354,9 +354,9 @@ define i32 @sub_sext_i8_i32(i8* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw16.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @sub_sext_i16_i32(i16* %p, i32 %v) {
+define i32 @sub_sext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw sub i16* %p, i16 %t seq_cst
+  %old = atomicrmw sub ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i32
   ret i32 %e
 }
@@ -366,9 +366,9 @@ define i32 @sub_sext_i16_i32(i16* %p, i32 %v) {
 ; CHECK: i64.atomic.rmw8.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @sub_sext_i8_i64(i8* %p, i64 %v) {
+define i64 @sub_sext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw sub i8* %p, i8 %t seq_cst
+  %old = atomicrmw sub ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i64
   ret i64 %e
 }
@@ -378,9 +378,9 @@ define i64 @sub_sext_i8_i64(i8* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw16.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @sub_sext_i16_i64(i16* %p, i64 %v) {
+define i64 @sub_sext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw sub i16* %p, i16 %t seq_cst
+  %old = atomicrmw sub ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i64
   ret i64 %e
 }
@@ -392,9 +392,9 @@ define i64 @sub_sext_i16_i64(i16* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw.sub $push1=, 0($0), $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
-define i64 @sub_sext_i32_i64(i32* %p, i64 %v) {
+define i64 @sub_sext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw sub i32* %p, i32 %t seq_cst
+  %old = atomicrmw sub ptr %p, i32 %t seq_cst
   %e = sext i32 %old to i64
   ret i64 %e
 }
@@ -406,9 +406,9 @@ define i64 @sub_sext_i32_i64(i32* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw8.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @and_sext_i8_i32(i8* %p, i32 %v) {
+define i32 @and_sext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw and i8* %p, i8 %t seq_cst
+  %old = atomicrmw and ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i32
   ret i32 %e
 }
@@ -418,9 +418,9 @@ define i32 @and_sext_i8_i32(i8* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw16.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @and_sext_i16_i32(i16* %p, i32 %v) {
+define i32 @and_sext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw and i16* %p, i16 %t seq_cst
+  %old = atomicrmw and ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i32
   ret i32 %e
 }
@@ -430,9 +430,9 @@ define i32 @and_sext_i16_i32(i16* %p, i32 %v) {
 ; CHECK: i64.atomic.rmw8.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @and_sext_i8_i64(i8* %p, i64 %v) {
+define i64 @and_sext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw and i8* %p, i8 %t seq_cst
+  %old = atomicrmw and ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i64
   ret i64 %e
 }
@@ -442,9 +442,9 @@ define i64 @and_sext_i8_i64(i8* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw16.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @and_sext_i16_i64(i16* %p, i64 %v) {
+define i64 @and_sext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw and i16* %p, i16 %t seq_cst
+  %old = atomicrmw and ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i64
   ret i64 %e
 }
@@ -456,9 +456,9 @@ define i64 @and_sext_i16_i64(i16* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw.and $push1=, 0($0), $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
-define i64 @and_sext_i32_i64(i32* %p, i64 %v) {
+define i64 @and_sext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw and i32* %p, i32 %t seq_cst
+  %old = atomicrmw and ptr %p, i32 %t seq_cst
   %e = sext i32 %old to i64
   ret i64 %e
 }
@@ -470,9 +470,9 @@ define i64 @and_sext_i32_i64(i32* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw8.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @or_sext_i8_i32(i8* %p, i32 %v) {
+define i32 @or_sext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw or i8* %p, i8 %t seq_cst
+  %old = atomicrmw or ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i32
   ret i32 %e
 }
@@ -482,9 +482,9 @@ define i32 @or_sext_i8_i32(i8* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw16.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @or_sext_i16_i32(i16* %p, i32 %v) {
+define i32 @or_sext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw or i16* %p, i16 %t seq_cst
+  %old = atomicrmw or ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i32
   ret i32 %e
 }
@@ -494,9 +494,9 @@ define i32 @or_sext_i16_i32(i16* %p, i32 %v) {
 ; CHECK: i64.atomic.rmw8.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @or_sext_i8_i64(i8* %p, i64 %v) {
+define i64 @or_sext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw or i8* %p, i8 %t seq_cst
+  %old = atomicrmw or ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i64
   ret i64 %e
 }
@@ -506,9 +506,9 @@ define i64 @or_sext_i8_i64(i8* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw16.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @or_sext_i16_i64(i16* %p, i64 %v) {
+define i64 @or_sext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw or i16* %p, i16 %t seq_cst
+  %old = atomicrmw or ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i64
   ret i64 %e
 }
@@ -520,9 +520,9 @@ define i64 @or_sext_i16_i64(i16* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw.or $push1=, 0($0), $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
-define i64 @or_sext_i32_i64(i32* %p, i64 %v) {
+define i64 @or_sext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw or i32* %p, i32 %t seq_cst
+  %old = atomicrmw or ptr %p, i32 %t seq_cst
   %e = sext i32 %old to i64
   ret i64 %e
 }
@@ -534,9 +534,9 @@ define i64 @or_sext_i32_i64(i32* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw8.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @xor_sext_i8_i32(i8* %p, i32 %v) {
+define i32 @xor_sext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw xor i8* %p, i8 %t seq_cst
+  %old = atomicrmw xor ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i32
   ret i32 %e
 }
@@ -546,9 +546,9 @@ define i32 @xor_sext_i8_i32(i8* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw16.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @xor_sext_i16_i32(i16* %p, i32 %v) {
+define i32 @xor_sext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw xor i16* %p, i16 %t seq_cst
+  %old = atomicrmw xor ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i32
   ret i32 %e
 }
@@ -558,9 +558,9 @@ define i32 @xor_sext_i16_i32(i16* %p, i32 %v) {
 ; CHECK: i64.atomic.rmw8.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @xor_sext_i8_i64(i8* %p, i64 %v) {
+define i64 @xor_sext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw xor i8* %p, i8 %t seq_cst
+  %old = atomicrmw xor ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i64
   ret i64 %e
 }
@@ -570,9 +570,9 @@ define i64 @xor_sext_i8_i64(i8* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw16.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @xor_sext_i16_i64(i16* %p, i64 %v) {
+define i64 @xor_sext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw xor i16* %p, i16 %t seq_cst
+  %old = atomicrmw xor ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i64
   ret i64 %e
 }
@@ -584,9 +584,9 @@ define i64 @xor_sext_i16_i64(i16* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw.xor $push1=, 0($0), $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
-define i64 @xor_sext_i32_i64(i32* %p, i64 %v) {
+define i64 @xor_sext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw xor i32* %p, i32 %t seq_cst
+  %old = atomicrmw xor ptr %p, i32 %t seq_cst
   %e = sext i32 %old to i64
   ret i64 %e
 }
@@ -598,9 +598,9 @@ define i64 @xor_sext_i32_i64(i32* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw8.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @xchg_sext_i8_i32(i8* %p, i32 %v) {
+define i32 @xchg_sext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw xchg i8* %p, i8 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i32
   ret i32 %e
 }
@@ -610,9 +610,9 @@ define i32 @xchg_sext_i8_i32(i8* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw16.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @xchg_sext_i16_i32(i16* %p, i32 %v) {
+define i32 @xchg_sext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw xchg i16* %p, i16 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i32
   ret i32 %e
 }
@@ -622,9 +622,9 @@ define i32 @xchg_sext_i16_i32(i16* %p, i32 %v) {
 ; CHECK: i64.atomic.rmw8.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @xchg_sext_i8_i64(i8* %p, i64 %v) {
+define i64 @xchg_sext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw xchg i8* %p, i8 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i64
   ret i64 %e
 }
@@ -634,9 +634,9 @@ define i64 @xchg_sext_i8_i64(i8* %p, i64 %v) {
 ; CHECK: i64.atomic.rmw16.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @xchg_sext_i16_i64(i16* %p, i64 %v) {
+define i64 @xchg_sext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw xchg i16* %p, i16 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i64
   ret i64 %e
 }
@@ -648,9 +648,9 @@ define i64 @xchg_sext_i16_i64(i16* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw.xchg $push1=, 0($0), $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
 ; CHECK-NEXT: return $pop2{{$}}
-define i64 @xchg_sext_i32_i64(i32* %p, i64 %v) {
+define i64 @xchg_sext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw xchg i32* %p, i32 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i32 %t seq_cst
   %e = sext i32 %old to i64
   ret i64 %e
 }
@@ -662,10 +662,10 @@ define i64 @xchg_sext_i32_i64(i32* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @cmpxchg_sext_i8_i32(i8* %p, i32 %exp, i32 %new) {
+define i32 @cmpxchg_sext_i8_i32(ptr %p, i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* %p, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %e = sext i8 %old to i32
   ret i32 %e
@@ -676,10 +676,10 @@ define i32 @cmpxchg_sext_i8_i32(i8* %p, i32 %exp, i32 %new) {
 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @cmpxchg_sext_i16_i32(i16* %p, i32 %exp, i32 %new) {
+define i32 @cmpxchg_sext_i16_i32(ptr %p, i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i16
   %new_t = trunc i32 %new to i16
-  %pair = cmpxchg i16* %p, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %e = sext i16 %old to i32
   ret i32 %e
@@ -690,10 +690,10 @@ define i32 @cmpxchg_sext_i16_i32(i16* %p, i32 %exp, i32 %new) {
 ; CHECK: i64.atomic.rmw8.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @cmpxchg_sext_i8_i64(i8* %p, i64 %exp, i64 %new) {
+define i64 @cmpxchg_sext_i8_i64(ptr %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i8
   %new_t = trunc i64 %new to i8
-  %pair = cmpxchg i8* %p, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %e = sext i8 %old to i64
   ret i64 %e
@@ -704,10 +704,10 @@ define i64 @cmpxchg_sext_i8_i64(i8* %p, i64 %exp, i64 %new) {
 ; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @cmpxchg_sext_i16_i64(i16* %p, i64 %exp, i64 %new) {
+define i64 @cmpxchg_sext_i16_i64(ptr %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i16
   %new_t = trunc i64 %new to i16
-  %pair = cmpxchg i16* %p, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %e = sext i16 %old to i64
   ret i64 %e
@@ -721,10 +721,10 @@ define i64 @cmpxchg_sext_i16_i64(i16* %p, i64 %exp, i64 %new) {
 ; CHECK-NEXT: i32.atomic.rmw.cmpxchg $push2=, 0($0), $pop1, $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push3=, $pop2{{$}}
 ; CHECK-NEXT: return $pop3{{$}}
-define i64 @cmpxchg_sext_i32_i64(i32* %p, i64 %exp, i64 %new) {
+define i64 @cmpxchg_sext_i32_i64(ptr %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i32
   %new_t = trunc i64 %new to i32
-  %pair = cmpxchg i32* %p, i32 %exp_t, i32 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i32 %exp_t, i32 %new_t seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   %e = sext i32 %old to i64
   ret i64 %e
@@ -740,9 +740,9 @@ define i64 @cmpxchg_sext_i32_i64(i32* %p, i64 %exp, i64 %new) {
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw8.cmpxchg_u
 ; CHECK: i32.extend8_s
-define i32 @nand_sext_i8_i32(i8* %p, i32 %v) {
+define i32 @nand_sext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw nand i8* %p, i8 %t seq_cst
+  %old = atomicrmw nand ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i32
   ret i32 %e
 }
@@ -752,9 +752,9 @@ define i32 @nand_sext_i8_i32(i8* %p, i32 %v) {
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw16.cmpxchg_u
 ; CHECK: i32.extend16_s
-define i32 @nand_sext_i16_i32(i16* %p, i32 %v) {
+define i32 @nand_sext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw nand i16* %p, i16 %t seq_cst
+  %old = atomicrmw nand ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i32
   ret i32 %e
 }
@@ -766,9 +766,9 @@ define i32 @nand_sext_i16_i32(i16* %p, i32 %v) {
 ; CHECK: i32.atomic.rmw8.cmpxchg_u
 ; CHECK: i64.extend_i32_u
 ; CHECK: i64.extend8_s
-define i64 @nand_sext_i8_i64(i8* %p, i64 %v) {
+define i64 @nand_sext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw nand i8* %p, i8 %t seq_cst
+  %old = atomicrmw nand ptr %p, i8 %t seq_cst
   %e = sext i8 %old to i64
   ret i64 %e
 }
@@ -780,9 +780,9 @@ define i64 @nand_sext_i8_i64(i8* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw16.cmpxchg_u
 ; CHECK: i64.extend_i32_u
 ; CHECK: i64.extend16_s
-define i64 @nand_sext_i16_i64(i16* %p, i64 %v) {
+define i64 @nand_sext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw nand i16* %p, i16 %t seq_cst
+  %old = atomicrmw nand ptr %p, i16 %t seq_cst
   %e = sext i16 %old to i64
   ret i64 %e
 }
@@ -793,9 +793,9 @@ define i64 @nand_sext_i16_i64(i16* %p, i64 %v) {
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw.cmpxchg
 ; CHECK: i64.extend_i32_s
-define i64 @nand_sext_i32_i64(i32* %p, i64 %v) {
+define i64 @nand_sext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw nand i32* %p, i32 %t seq_cst
+  %old = atomicrmw nand ptr %p, i32 %t seq_cst
   %e = sext i32 %old to i64
   ret i64 %e
 }
@@ -810,9 +810,9 @@ define i64 @nand_sext_i32_i64(i32* %p, i64 %v) {
 ; CHECK-NEXT: .functype add_zext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @add_zext_i8_i32(i8* %p, i32 %v) {
+define i32 @add_zext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %p, i8 %t seq_cst
+  %old = atomicrmw add ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i32
   ret i32 %e
 }
@@ -821,9 +821,9 @@ define i32 @add_zext_i8_i32(i8* %p, i32 %v) {
 ; CHECK-NEXT: .functype add_zext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw16.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @add_zext_i16_i32(i16* %p, i32 %v) {
+define i32 @add_zext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw add i16* %p, i16 %t seq_cst
+  %old = atomicrmw add ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i32
   ret i32 %e
 }
@@ -832,9 +832,9 @@ define i32 @add_zext_i16_i32(i16* %p, i32 %v) {
 ; CHECK-NEXT: .functype add_zext_i8_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @add_zext_i8_i64(i8* %p, i64 %v) {
+define i64 @add_zext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw add i8* %p, i8 %t seq_cst
+  %old = atomicrmw add ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i64
   ret i64 %e
 }
@@ -843,9 +843,9 @@ define i64 @add_zext_i8_i64(i8* %p, i64 %v) {
 ; CHECK-NEXT: .functype add_zext_i16_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw16.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @add_zext_i16_i64(i16* %p, i64 %v) {
+define i64 @add_zext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw add i16* %p, i16 %t seq_cst
+  %old = atomicrmw add ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i64
   ret i64 %e
 }
@@ -854,9 +854,9 @@ define i64 @add_zext_i16_i64(i16* %p, i64 %v) {
 ; CHECK-NEXT: .functype add_zext_i32_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw32.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @add_zext_i32_i64(i32* %p, i64 %v) {
+define i64 @add_zext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw add i32* %p, i32 %t seq_cst
+  %old = atomicrmw add ptr %p, i32 %t seq_cst
   %e = zext i32 %old to i64
   ret i64 %e
 }
@@ -867,9 +867,9 @@ define i64 @add_zext_i32_i64(i32* %p, i64 %v) {
 ; CHECK-NEXT: .functype sub_zext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw8.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @sub_zext_i8_i32(i8* %p, i32 %v) {
+define i32 @sub_zext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw sub i8* %p, i8 %t seq_cst
+  %old = atomicrmw sub ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i32
   ret i32 %e
 }
@@ -878,9 +878,9 @@ define i32 @sub_zext_i8_i32(i8* %p, i32 %v) {
 ; CHECK-NEXT: .functype sub_zext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw16.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @sub_zext_i16_i32(i16* %p, i32 %v) {
+define i32 @sub_zext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw sub i16* %p, i16 %t seq_cst
+  %old = atomicrmw sub ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i32
   ret i32 %e
 }
@@ -889,9 +889,9 @@ define i32 @sub_zext_i16_i32(i16* %p, i32 %v) {
 ; CHECK-NEXT: .functype sub_zext_i8_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw8.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @sub_zext_i8_i64(i8* %p, i64 %v) {
+define i64 @sub_zext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw sub i8* %p, i8 %t seq_cst
+  %old = atomicrmw sub ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i64
   ret i64 %e
 }
@@ -900,9 +900,9 @@ define i64 @sub_zext_i8_i64(i8* %p, i64 %v) {
 ; CHECK-NEXT: .functype sub_zext_i16_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw16.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @sub_zext_i16_i64(i16* %p, i64 %v) {
+define i64 @sub_zext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw sub i16* %p, i16 %t seq_cst
+  %old = atomicrmw sub ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i64
   ret i64 %e
 }
@@ -911,9 +911,9 @@ define i64 @sub_zext_i16_i64(i16* %p, i64 %v) {
 ; CHECK-NEXT: .functype sub_zext_i32_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw32.sub_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @sub_zext_i32_i64(i32* %p, i64 %v) {
+define i64 @sub_zext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw sub i32* %p, i32 %t seq_cst
+  %old = atomicrmw sub ptr %p, i32 %t seq_cst
   %e = zext i32 %old to i64
   ret i64 %e
 }
@@ -924,9 +924,9 @@ define i64 @sub_zext_i32_i64(i32* %p, i64 %v) {
 ; CHECK-NEXT: .functype and_zext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw8.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @and_zext_i8_i32(i8* %p, i32 %v) {
+define i32 @and_zext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw and i8* %p, i8 %t seq_cst
+  %old = atomicrmw and ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i32
   ret i32 %e
 }
@@ -935,9 +935,9 @@ define i32 @and_zext_i8_i32(i8* %p, i32 %v) {
 ; CHECK-NEXT: .functype and_zext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw16.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @and_zext_i16_i32(i16* %p, i32 %v) {
+define i32 @and_zext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw and i16* %p, i16 %t seq_cst
+  %old = atomicrmw and ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i32
   ret i32 %e
 }
@@ -946,9 +946,9 @@ define i32 @and_zext_i16_i32(i16* %p, i32 %v) {
 ; CHECK-NEXT: .functype and_zext_i8_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw8.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @and_zext_i8_i64(i8* %p, i64 %v) {
+define i64 @and_zext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw and i8* %p, i8 %t seq_cst
+  %old = atomicrmw and ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i64
   ret i64 %e
 }
@@ -957,9 +957,9 @@ define i64 @and_zext_i8_i64(i8* %p, i64 %v) {
 ; CHECK-NEXT: .functype and_zext_i16_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw16.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @and_zext_i16_i64(i16* %p, i64 %v) {
+define i64 @and_zext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw and i16* %p, i16 %t seq_cst
+  %old = atomicrmw and ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i64
   ret i64 %e
 }
@@ -968,9 +968,9 @@ define i64 @and_zext_i16_i64(i16* %p, i64 %v) {
 ; CHECK-NEXT: .functype and_zext_i32_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw32.and_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @and_zext_i32_i64(i32* %p, i64 %v) {
+define i64 @and_zext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw and i32* %p, i32 %t seq_cst
+  %old = atomicrmw and ptr %p, i32 %t seq_cst
   %e = zext i32 %old to i64
   ret i64 %e
 }
@@ -981,9 +981,9 @@ define i64 @and_zext_i32_i64(i32* %p, i64 %v) {
 ; CHECK-NEXT: .functype or_zext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw8.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @or_zext_i8_i32(i8* %p, i32 %v) {
+define i32 @or_zext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw or i8* %p, i8 %t seq_cst
+  %old = atomicrmw or ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i32
   ret i32 %e
 }
@@ -992,9 +992,9 @@ define i32 @or_zext_i8_i32(i8* %p, i32 %v) {
 ; CHECK-NEXT: .functype or_zext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw16.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @or_zext_i16_i32(i16* %p, i32 %v) {
+define i32 @or_zext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw or i16* %p, i16 %t seq_cst
+  %old = atomicrmw or ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i32
   ret i32 %e
 }
@@ -1003,9 +1003,9 @@ define i32 @or_zext_i16_i32(i16* %p, i32 %v) {
 ; CHECK-NEXT: .functype or_zext_i8_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw8.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @or_zext_i8_i64(i8* %p, i64 %v) {
+define i64 @or_zext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw or i8* %p, i8 %t seq_cst
+  %old = atomicrmw or ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i64
   ret i64 %e
 }
@@ -1014,9 +1014,9 @@ define i64 @or_zext_i8_i64(i8* %p, i64 %v) {
 ; CHECK-NEXT: .functype or_zext_i16_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw16.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @or_zext_i16_i64(i16* %p, i64 %v) {
+define i64 @or_zext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw or i16* %p, i16 %t seq_cst
+  %old = atomicrmw or ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i64
   ret i64 %e
 }
@@ -1025,9 +1025,9 @@ define i64 @or_zext_i16_i64(i16* %p, i64 %v) {
 ; CHECK-NEXT: .functype or_zext_i32_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw32.or_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @or_zext_i32_i64(i32* %p, i64 %v) {
+define i64 @or_zext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw or i32* %p, i32 %t seq_cst
+  %old = atomicrmw or ptr %p, i32 %t seq_cst
   %e = zext i32 %old to i64
   ret i64 %e
 }
@@ -1038,9 +1038,9 @@ define i64 @or_zext_i32_i64(i32* %p, i64 %v) {
 ; CHECK-NEXT: .functype xor_zext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw8.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @xor_zext_i8_i32(i8* %p, i32 %v) {
+define i32 @xor_zext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw xor i8* %p, i8 %t seq_cst
+  %old = atomicrmw xor ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i32
   ret i32 %e
 }
@@ -1049,9 +1049,9 @@ define i32 @xor_zext_i8_i32(i8* %p, i32 %v) {
 ; CHECK-NEXT: .functype xor_zext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw16.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @xor_zext_i16_i32(i16* %p, i32 %v) {
+define i32 @xor_zext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw xor i16* %p, i16 %t seq_cst
+  %old = atomicrmw xor ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i32
   ret i32 %e
 }
@@ -1060,9 +1060,9 @@ define i32 @xor_zext_i16_i32(i16* %p, i32 %v) {
 ; CHECK-NEXT: .functype xor_zext_i8_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw8.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @xor_zext_i8_i64(i8* %p, i64 %v) {
+define i64 @xor_zext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw xor i8* %p, i8 %t seq_cst
+  %old = atomicrmw xor ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i64
   ret i64 %e
 }
@@ -1071,9 +1071,9 @@ define i64 @xor_zext_i8_i64(i8* %p, i64 %v) {
 ; CHECK-NEXT: .functype xor_zext_i16_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw16.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @xor_zext_i16_i64(i16* %p, i64 %v) {
+define i64 @xor_zext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw xor i16* %p, i16 %t seq_cst
+  %old = atomicrmw xor ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i64
   ret i64 %e
 }
@@ -1082,9 +1082,9 @@ define i64 @xor_zext_i16_i64(i16* %p, i64 %v) {
 ; CHECK-NEXT: .functype xor_zext_i32_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw32.xor_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @xor_zext_i32_i64(i32* %p, i64 %v) {
+define i64 @xor_zext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw xor i32* %p, i32 %t seq_cst
+  %old = atomicrmw xor ptr %p, i32 %t seq_cst
   %e = zext i32 %old to i64
   ret i64 %e
 }
@@ -1095,9 +1095,9 @@ define i64 @xor_zext_i32_i64(i32* %p, i64 %v) {
 ; CHECK-NEXT: .functype xchg_zext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw8.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @xchg_zext_i8_i32(i8* %p, i32 %v) {
+define i32 @xchg_zext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw xchg i8* %p, i8 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i32
   ret i32 %e
 }
@@ -1106,9 +1106,9 @@ define i32 @xchg_zext_i8_i32(i8* %p, i32 %v) {
 ; CHECK-NEXT: .functype xchg_zext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw16.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @xchg_zext_i16_i32(i16* %p, i32 %v) {
+define i32 @xchg_zext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw xchg i16* %p, i16 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i32
   ret i32 %e
 }
@@ -1117,9 +1117,9 @@ define i32 @xchg_zext_i16_i32(i16* %p, i32 %v) {
 ; CHECK-NEXT: .functype xchg_zext_i8_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw8.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @xchg_zext_i8_i64(i8* %p, i64 %v) {
+define i64 @xchg_zext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw xchg i8* %p, i8 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i64
   ret i64 %e
 }
@@ -1128,9 +1128,9 @@ define i64 @xchg_zext_i8_i64(i8* %p, i64 %v) {
 ; CHECK-NEXT: .functype xchg_zext_i16_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw16.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @xchg_zext_i16_i64(i16* %p, i64 %v) {
+define i64 @xchg_zext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw xchg i16* %p, i16 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i64
   ret i64 %e
 }
@@ -1139,9 +1139,9 @@ define i64 @xchg_zext_i16_i64(i16* %p, i64 %v) {
 ; CHECK-NEXT: .functype xchg_zext_i32_i64 (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw32.xchg_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @xchg_zext_i32_i64(i32* %p, i64 %v) {
+define i64 @xchg_zext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw xchg i32* %p, i32 %t seq_cst
+  %old = atomicrmw xchg ptr %p, i32 %t seq_cst
   %e = zext i32 %old to i64
   ret i64 %e
 }
@@ -1152,10 +1152,10 @@ define i64 @xchg_zext_i32_i64(i32* %p, i64 %v) {
 ; CHECK-NEXT: .functype cmpxchg_zext_i8_i32 (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_zext_i8_i32(i8* %p, i32 %exp, i32 %new) {
+define i32 @cmpxchg_zext_i8_i32(ptr %p, i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* %p, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %e = zext i8 %old to i32
   ret i32 %e
@@ -1165,10 +1165,10 @@ define i32 @cmpxchg_zext_i8_i32(i8* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_zext_i16_i32 (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_zext_i16_i32(i16* %p, i32 %exp, i32 %new) {
+define i32 @cmpxchg_zext_i16_i32(ptr %p, i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i16
   %new_t = trunc i32 %new to i16
-  %pair = cmpxchg i16* %p, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %e = zext i16 %old to i32
   ret i32 %e
@@ -1178,10 +1178,10 @@ define i32 @cmpxchg_zext_i16_i32(i16* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_zext_i8_i64 (i32, i64, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw8.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @cmpxchg_zext_i8_i64(i8* %p, i64 %exp, i64 %new) {
+define i64 @cmpxchg_zext_i8_i64(ptr %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i8
   %new_t = trunc i64 %new to i8
-  %pair = cmpxchg i8* %p, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %e = zext i8 %old to i64
   ret i64 %e
@@ -1191,10 +1191,10 @@ define i64 @cmpxchg_zext_i8_i64(i8* %p, i64 %exp, i64 %new) {
 ; CHECK-NEXT: .functype cmpxchg_zext_i16_i64 (i32, i64, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @cmpxchg_zext_i16_i64(i16* %p, i64 %exp, i64 %new) {
+define i64 @cmpxchg_zext_i16_i64(ptr %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i16
   %new_t = trunc i64 %new to i16
-  %pair = cmpxchg i16* %p, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %e = zext i16 %old to i64
   ret i64 %e
@@ -1204,10 +1204,10 @@ define i64 @cmpxchg_zext_i16_i64(i16* %p, i64 %exp, i64 %new) {
 ; CHECK-NEXT: .functype cmpxchg_zext_i32_i64 (i32, i64, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw32.cmpxchg_u $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @cmpxchg_zext_i32_i64(i32* %p, i64 %exp, i64 %new) {
+define i64 @cmpxchg_zext_i32_i64(ptr %p, i64 %exp, i64 %new) {
   %exp_t = trunc i64 %exp to i32
   %new_t = trunc i64 %new to i32
-  %pair = cmpxchg i32* %p, i32 %exp_t, i32 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i32 %exp_t, i32 %new_t seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   %e = zext i32 %old to i64
   ret i64 %e
@@ -1222,9 +1222,9 @@ define i64 @cmpxchg_zext_i32_i64(i32* %p, i64 %exp, i64 %new) {
 ; CHECK-NEXT: .functype nand_zext_i8_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw8.cmpxchg_u
-define i32 @nand_zext_i8_i32(i8* %p, i32 %v) {
+define i32 @nand_zext_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw nand i8* %p, i8 %t seq_cst
+  %old = atomicrmw nand ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i32
   ret i32 %e
 }
@@ -1233,9 +1233,9 @@ define i32 @nand_zext_i8_i32(i8* %p, i32 %v) {
 ; CHECK-NEXT: .functype nand_zext_i16_i32 (i32, i32) -> (i32){{$}}
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw16.cmpxchg_u
-define i32 @nand_zext_i16_i32(i16* %p, i32 %v) {
+define i32 @nand_zext_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  %old = atomicrmw nand i16* %p, i16 %t seq_cst
+  %old = atomicrmw nand ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i32
   ret i32 %e
 }
@@ -1246,9 +1246,9 @@ define i32 @nand_zext_i16_i32(i16* %p, i32 %v) {
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw8.cmpxchg_u
 ; CHECK: i64.extend_i32_u
-define i64 @nand_zext_i8_i64(i8* %p, i64 %v) {
+define i64 @nand_zext_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  %old = atomicrmw nand i8* %p, i8 %t seq_cst
+  %old = atomicrmw nand ptr %p, i8 %t seq_cst
   %e = zext i8 %old to i64
   ret i64 %e
 }
@@ -1259,9 +1259,9 @@ define i64 @nand_zext_i8_i64(i8* %p, i64 %v) {
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw16.cmpxchg_u
 ; CHECK: i64.extend_i32_u
-define i64 @nand_zext_i16_i64(i16* %p, i64 %v) {
+define i64 @nand_zext_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  %old = atomicrmw nand i16* %p, i16 %t seq_cst
+  %old = atomicrmw nand ptr %p, i16 %t seq_cst
   %e = zext i16 %old to i64
   ret i64 %e
 }
@@ -1272,9 +1272,9 @@ define i64 @nand_zext_i16_i64(i16* %p, i64 %v) {
 ; CHECK: loop
 ; CHECK: i32.atomic.rmw.cmpxchg
 ; CHECK: i64.extend_i32_u
-define i64 @nand_zext_i32_i64(i32* %p, i64 %v) {
+define i64 @nand_zext_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  %old = atomicrmw nand i32* %p, i32 %t seq_cst
+  %old = atomicrmw nand ptr %p, i32 %t seq_cst
   %e = zext i32 %old to i64
   ret i64 %e
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/bulk-memory.ll b/llvm/test/CodeGen/WebAssembly/bulk-memory.ll
index 4ccc95c8f492..dc29dc81c13e 100644
--- a/llvm/test/CodeGen/WebAssembly/bulk-memory.ll
+++ b/llvm/test/CodeGen/WebAssembly/bulk-memory.ll
@@ -5,25 +5,22 @@
 
 target triple = "wasm32-unknown-unknown"
 
-declare void @llvm.memcpy.p0i8.p0i8.i8(i8*, i8*, i8, i1)
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)
-declare void @llvm.memcpy.p0i32.p0i32.i32(i32*, i32*, i32, i1)
+declare void @llvm.memcpy.p0.p0.i8(ptr, ptr, i8, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
 
-declare void @llvm.memmove.p0i8.p0i8.i8(i8*, i8*, i8, i1)
-declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i1)
-declare void @llvm.memmove.p0i32.p0i32.i32(i32*, i32*, i32, i1)
+declare void @llvm.memmove.p0.p0.i8(ptr, ptr, i8, i1)
+declare void @llvm.memmove.p0.p0.i32(ptr, ptr, i32, i1)
 
-declare void @llvm.memset.p0i8.i8(i8*, i8, i8, i1)
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1)
-declare void @llvm.memset.p0i32.i32(i32*, i8, i32, i1)
+declare void @llvm.memset.p0.i8(ptr, i8, i8, i1)
+declare void @llvm.memset.p0.i32(ptr, i8, i32, i1)
 
 ; CHECK-LABEL: memcpy_i8:
 ; NO-BULK-MEM-NOT: memory.copy
 ; BULK-MEM-NEXT: .functype memcpy_i8 (i32, i32, i32) -> ()
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memcpy_i8(i8* %dest, i8* %src, i8 zeroext %len) {
-  call void @llvm.memcpy.p0i8.p0i8.i8(i8* %dest, i8* %src, i8 %len, i1 0)
+define void @memcpy_i8(ptr %dest, ptr %src, i8 zeroext %len) {
+  call void @llvm.memcpy.p0.p0.i8(ptr %dest, ptr %src, i8 %len, i1 0)
   ret void
 }
 
@@ -32,8 +29,8 @@ define void @memcpy_i8(i8* %dest, i8* %src, i8 zeroext %len) {
 ; BULK-MEM-NEXT: .functype memmove_i8 (i32, i32, i32) -> ()
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memmove_i8(i8* %dest, i8* %src, i8 zeroext %len) {
-  call void @llvm.memmove.p0i8.p0i8.i8(i8* %dest, i8* %src, i8 %len, i1 0)
+define void @memmove_i8(ptr %dest, ptr %src, i8 zeroext %len) {
+  call void @llvm.memmove.p0.p0.i8(ptr %dest, ptr %src, i8 %len, i1 0)
   ret void
 }
 
@@ -42,8 +39,8 @@ define void @memmove_i8(i8* %dest, i8* %src, i8 zeroext %len) {
 ; BULK-MEM-NEXT: .functype memset_i8 (i32, i32, i32) -> ()
 ; BULK-MEM-NEXT: memory.fill 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memset_i8(i8* %dest, i8 %val, i8 zeroext %len) {
-  call void @llvm.memset.p0i8.i8(i8* %dest, i8 %val, i8 %len, i1 0)
+define void @memset_i8(ptr %dest, i8 %val, i8 zeroext %len) {
+  call void @llvm.memset.p0.i8(ptr %dest, i8 %val, i8 %len, i1 0)
   ret void
 }
 
@@ -52,8 +49,8 @@ define void @memset_i8(i8* %dest, i8 %val, i8 zeroext %len) {
 ; BULK-MEM-NEXT: .functype memcpy_i32 (i32, i32, i32) -> ()
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memcpy_i32(i32* %dest, i32* %src, i32 %len) {
-  call void @llvm.memcpy.p0i32.p0i32.i32(i32* %dest, i32* %src, i32 %len, i1 0)
+define void @memcpy_i32(ptr %dest, ptr %src, i32 %len) {
+  call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 %len, i1 0)
   ret void
 }
 
@@ -62,8 +59,8 @@ define void @memcpy_i32(i32* %dest, i32* %src, i32 %len) {
 ; BULK-MEM-NEXT: .functype memmove_i32 (i32, i32, i32) -> ()
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memmove_i32(i32* %dest, i32* %src, i32 %len) {
-  call void @llvm.memmove.p0i32.p0i32.i32(i32* %dest, i32* %src, i32 %len, i1 0)
+define void @memmove_i32(ptr %dest, ptr %src, i32 %len) {
+  call void @llvm.memmove.p0.p0.i32(ptr %dest, ptr %src, i32 %len, i1 0)
   ret void
 }
 
@@ -72,8 +69,8 @@ define void @memmove_i32(i32* %dest, i32* %src, i32 %len) {
 ; BULK-MEM-NEXT: .functype memset_i32 (i32, i32, i32) -> ()
 ; BULK-MEM-NEXT: memory.fill 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memset_i32(i32* %dest, i8 %val, i32 %len) {
-  call void @llvm.memset.p0i32.i32(i32* %dest, i8 %val, i32 %len, i1 0)
+define void @memset_i32(ptr %dest, i8 %val, i32 %len) {
+  call void @llvm.memset.p0.i32(ptr %dest, i8 %val, i32 %len, i1 0)
   ret void
 }
 
@@ -82,8 +79,8 @@ define void @memset_i32(i32* %dest, i8 %val, i32 %len) {
 ; CHECK-NEXT: i32.load8_u $push[[L0:[0-9]+]]=, 0($1)
 ; CHECK-NEXT: i32.store8 0($0), $pop[[L0]]
 ; CHECK-NEXT: return
-define void @memcpy_1(i8* %dest, i8* %src) {
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 1, i1 0)
+define void @memcpy_1(ptr %dest, ptr %src) {
+  call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 1, i1 0)
   ret void
 }
 
@@ -92,8 +89,8 @@ define void @memcpy_1(i8* %dest, i8* %src) {
 ; CHECK-NEXT: i32.load8_u $push[[L0:[0-9]+]]=, 0($1)
 ; CHECK-NEXT: i32.store8 0($0), $pop[[L0]]
 ; CHECK-NEXT: return
-define void @memmove_1(i8* %dest, i8* %src) {
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 1, i1 0)
+define void @memmove_1(ptr %dest, ptr %src) {
+  call void @llvm.memmove.p0.p0.i32(ptr %dest, ptr %src, i32 1, i1 0)
   ret void
 }
 
@@ -102,8 +99,8 @@ define void @memmove_1(i8* %dest, i8* %src) {
 ; BULK-MEM-NEXT: .functype memset_1 (i32, i32) -> ()
 ; BULK-MEM-NEXT: i32.store8 0($0), $1
 ; BULK-MEM-NEXT: return
-define void @memset_1(i8* %dest, i8 %val) {
-  call void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 1, i1 0)
+define void @memset_1(ptr %dest, i8 %val) {
+  call void @llvm.memset.p0.i32(ptr %dest, i8 %val, i32 1, i1 0)
   ret void
 }
 
@@ -113,8 +110,8 @@ define void @memset_1(i8* %dest, i8 %val) {
 ; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]]
 ; BULK-MEM-NEXT: return
-define void @memcpy_1024(i8* %dest, i8* %src) {
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 1024, i1 0)
+define void @memcpy_1024(ptr %dest, ptr %src) {
+  call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 1024, i1 0)
   ret void
 }
 
@@ -124,8 +121,8 @@ define void @memcpy_1024(i8* %dest, i8* %src) {
 ; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]]
 ; BULK-MEM-NEXT: return
-define void @memmove_1024(i8* %dest, i8* %src) {
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 1024, i1 0)
+define void @memmove_1024(ptr %dest, ptr %src) {
+  call void @llvm.memmove.p0.p0.i32(ptr %dest, ptr %src, i32 1024, i1 0)
   ret void
 }
 
@@ -135,8 +132,8 @@ define void @memmove_1024(i8* %dest, i8* %src) {
 ; BULK-MEM-NEXT: i32.const $push[[L0:[0-9]+]]=, 1024
 ; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop[[L0]]
 ; BULK-MEM-NEXT: return
-define void @memset_1024(i8* %dest, i8 %val) {
-  call void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 1024, i1 0)
+define void @memset_1024(ptr %dest, i8 %val) {
+  call void @llvm.memset.p0.i32(ptr %dest, i8 %val, i32 1024, i1 0)
   ret void
 }
 
@@ -162,10 +159,9 @@ define void @memset_1024(i8* %dest, i8 %val) {
 ; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L4]], $pop[[L5]]
 ; BULK-MEM-NEXT: return
-define void @memcpy_alloca_src(i8* %dst) {
+define void @memcpy_alloca_src(ptr %dst) {
   %a = alloca [100 x i8]
-  %p = bitcast [100 x i8]* %a to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %p, i32 100, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %a, i32 100, i1 false)
   ret void
 }
 
@@ -180,10 +176,9 @@ define void @memcpy_alloca_src(i8* %dst) {
 ; BULK-MEM-NEXT: i32.const $push[[L5:[0-9]+]]=, 100
 ; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L4]], $0, $pop[[L5]]
 ; BULK-MEM-NEXT: return
-define void @memcpy_alloca_dst(i8* %src) {
+define void @memcpy_alloca_dst(ptr %src) {
   %a = alloca [100 x i8]
-  %p = bitcast [100 x i8]* %a to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %src, i32 100, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr %src, i32 100, i1 false)
   ret void
 }
 
@@ -200,7 +195,6 @@ define void @memcpy_alloca_dst(i8* %src) {
 ; BULK-MEM-NEXT: return
 define void @memset_alloca(i8 %val) {
   %a = alloca [100 x i8]
-  %p = bitcast [100 x i8]* %a to i8*
-  call void @llvm.memset.p0i8.i32(i8* %p, i8 %val, i32 100, i1 false)
+  call void @llvm.memset.p0.i32(ptr %a, i8 %val, i32 100, i1 false)
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll b/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll
index 88cf6b58c073..8ee5f6314381 100644
--- a/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll
+++ b/llvm/test/CodeGen/WebAssembly/bulk-memory64.ll
@@ -5,17 +5,14 @@
 
 target triple = "wasm64-unknown-unknown"
 
-declare void @llvm.memcpy.p0i8.p0i8.i8(i8*, i8*, i8, i1)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
-declare void @llvm.memcpy.p0i32.p0i32.i64(i32*, i32*, i64, i1)
+declare void @llvm.memcpy.p0.p0.i8(ptr, ptr, i8, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
 
-declare void @llvm.memmove.p0i8.p0i8.i8(i8*, i8*, i8, i1)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1)
-declare void @llvm.memmove.p0i32.p0i32.i64(i32*, i32*, i64, i1)
+declare void @llvm.memmove.p0.p0.i8(ptr, ptr, i8, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr, ptr, i64, i1)
 
-declare void @llvm.memset.p0i8.i8(i8*, i8, i8, i1)
-declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1)
-declare void @llvm.memset.p0i32.i64(i32*, i8, i64, i1)
+declare void @llvm.memset.p0.i8(ptr, i8, i8, i1)
+declare void @llvm.memset.p0.i64(ptr, i8, i64, i1)
 
 ; CHECK-LABEL: memcpy_i8:
 ; NO-BULK-MEM-NOT: memory.copy
@@ -23,8 +20,8 @@ declare void @llvm.memset.p0i32.i64(i32*, i8, i64, i1)
 ; BULK-MEM-NEXT: i64.extend_i32_u $push0=, $2
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop0
 ; BULK-MEM-NEXT: return
-define void @memcpy_i8(i8* %dest, i8* %src, i8 zeroext %len) {
-  call void @llvm.memcpy.p0i8.p0i8.i8(i8* %dest, i8* %src, i8 %len, i1 0)
+define void @memcpy_i8(ptr %dest, ptr %src, i8 zeroext %len) {
+  call void @llvm.memcpy.p0.p0.i8(ptr %dest, ptr %src, i8 %len, i1 0)
   ret void
 }
 
@@ -34,8 +31,8 @@ define void @memcpy_i8(i8* %dest, i8* %src, i8 zeroext %len) {
 ; BULK-MEM-NEXT: i64.extend_i32_u $push0=, $2
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop0
 ; BULK-MEM-NEXT: return
-define void @memmove_i8(i8* %dest, i8* %src, i8 zeroext %len) {
-  call void @llvm.memmove.p0i8.p0i8.i8(i8* %dest, i8* %src, i8 %len, i1 0)
+define void @memmove_i8(ptr %dest, ptr %src, i8 zeroext %len) {
+  call void @llvm.memmove.p0.p0.i8(ptr %dest, ptr %src, i8 %len, i1 0)
   ret void
 }
 
@@ -45,8 +42,8 @@ define void @memmove_i8(i8* %dest, i8* %src, i8 zeroext %len) {
 ; BULK-MEM-NEXT: i64.extend_i32_u $push0=, $2
 ; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop0
 ; BULK-MEM-NEXT: return
-define void @memset_i8(i8* %dest, i8 %val, i8 zeroext %len) {
-  call void @llvm.memset.p0i8.i8(i8* %dest, i8 %val, i8 %len, i1 0)
+define void @memset_i8(ptr %dest, i8 %val, i8 zeroext %len) {
+  call void @llvm.memset.p0.i8(ptr %dest, i8 %val, i8 %len, i1 0)
   ret void
 }
 
@@ -55,8 +52,8 @@ define void @memset_i8(i8* %dest, i8 %val, i8 zeroext %len) {
 ; BULK-MEM-NEXT: .functype memcpy_i32 (i64, i64, i64) -> ()
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memcpy_i32(i32* %dest, i32* %src, i64 %len) {
-  call void @llvm.memcpy.p0i32.p0i32.i64(i32* %dest, i32* %src, i64 %len, i1 0)
+define void @memcpy_i32(ptr %dest, ptr %src, i64 %len) {
+  call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 %len, i1 0)
   ret void
 }
 
@@ -65,8 +62,8 @@ define void @memcpy_i32(i32* %dest, i32* %src, i64 %len) {
 ; BULK-MEM-NEXT: .functype memmove_i32 (i64, i64, i64) -> ()
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memmove_i32(i32* %dest, i32* %src, i64 %len) {
-  call void @llvm.memmove.p0i32.p0i32.i64(i32* %dest, i32* %src, i64 %len, i1 0)
+define void @memmove_i32(ptr %dest, ptr %src, i64 %len) {
+  call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 %len, i1 0)
   ret void
 }
 
@@ -75,8 +72,8 @@ define void @memmove_i32(i32* %dest, i32* %src, i64 %len) {
 ; BULK-MEM-NEXT: .functype memset_i32 (i64, i32, i64) -> ()
 ; BULK-MEM-NEXT: memory.fill 0, $0, $1, $2
 ; BULK-MEM-NEXT: return
-define void @memset_i32(i32* %dest, i8 %val, i64 %len) {
-  call void @llvm.memset.p0i32.i64(i32* %dest, i8 %val, i64 %len, i1 0)
+define void @memset_i32(ptr %dest, i8 %val, i64 %len) {
+  call void @llvm.memset.p0.i64(ptr %dest, i8 %val, i64 %len, i1 0)
   ret void
 }
 
@@ -85,8 +82,8 @@ define void @memset_i32(i32* %dest, i8 %val, i64 %len) {
 ; CHECK-NEXT: i32.load8_u $push[[L0:[0-9]+]]=, 0($1)
 ; CHECK-NEXT: i32.store8 0($0), $pop[[L0]]
 ; CHECK-NEXT: return
-define void @memcpy_1(i8* %dest, i8* %src) {
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1, i1 0)
+define void @memcpy_1(ptr %dest, ptr %src) {
+  call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 1, i1 0)
   ret void
 }
 
@@ -95,8 +92,8 @@ define void @memcpy_1(i8* %dest, i8* %src) {
 ; CHECK-NEXT: i32.load8_u $push[[L0:[0-9]+]]=, 0($1)
 ; CHECK-NEXT: i32.store8 0($0), $pop[[L0]]
 ; CHECK-NEXT: return
-define void @memmove_1(i8* %dest, i8* %src) {
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1, i1 0)
+define void @memmove_1(ptr %dest, ptr %src) {
+  call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 1, i1 0)
   ret void
 }
 
@@ -105,8 +102,8 @@ define void @memmove_1(i8* %dest, i8* %src) {
 ; BULK-MEM-NEXT: .functype memset_1 (i64, i32) -> ()
 ; BULK-MEM-NEXT: i32.store8 0($0), $1
 ; BULK-MEM-NEXT: return
-define void @memset_1(i8* %dest, i8 %val) {
-  call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 1, i1 0)
+define void @memset_1(ptr %dest, i8 %val) {
+  call void @llvm.memset.p0.i64(ptr %dest, i8 %val, i64 1, i1 0)
   ret void
 }
 
@@ -116,8 +113,8 @@ define void @memset_1(i8* %dest, i8 %val) {
 ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]]
 ; BULK-MEM-NEXT: return
-define void @memcpy_1024(i8* %dest, i8* %src) {
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1024, i1 0)
+define void @memcpy_1024(ptr %dest, ptr %src) {
+  call void @llvm.memcpy.p0.p0.i64(ptr %dest, ptr %src, i64 1024, i1 0)
   ret void
 }
 
@@ -127,8 +124,8 @@ define void @memcpy_1024(i8* %dest, i8* %src) {
 ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $1, $pop[[L0]]
 ; BULK-MEM-NEXT: return
-define void @memmove_1024(i8* %dest, i8* %src) {
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dest, i8* %src, i64 1024, i1 0)
+define void @memmove_1024(ptr %dest, ptr %src) {
+  call void @llvm.memmove.p0.p0.i64(ptr %dest, ptr %src, i64 1024, i1 0)
   ret void
 }
 
@@ -138,8 +135,8 @@ define void @memmove_1024(i8* %dest, i8* %src) {
 ; BULK-MEM-NEXT: i64.const $push[[L0:[0-9]+]]=, 1024
 ; BULK-MEM-NEXT: memory.fill 0, $0, $1, $pop[[L0]]
 ; BULK-MEM-NEXT: return
-define void @memset_1024(i8* %dest, i8 %val) {
-  call void @llvm.memset.p0i8.i64(i8* %dest, i8 %val, i64 1024, i1 0)
+define void @memset_1024(ptr %dest, i8 %val) {
+  call void @llvm.memset.p0.i64(ptr %dest, i8 %val, i64 1024, i1 0)
   ret void
 }
 
@@ -165,10 +162,9 @@ define void @memset_1024(i8* %dest, i8 %val) {
 ; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100
 ; BULK-MEM-NEXT: memory.copy 0, 0, $0, $pop[[L4]], $pop[[L5]]
 ; BULK-MEM-NEXT: return
-define void @memcpy_alloca_src(i8* %dst) {
+define void @memcpy_alloca_src(ptr %dst) {
   %a = alloca [100 x i8]
-  %p = bitcast [100 x i8]* %a to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %p, i64 100, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %a, i64 100, i1 false)
   ret void
 }
 
@@ -183,10 +179,9 @@ define void @memcpy_alloca_src(i8* %dst) {
 ; BULK-MEM-NEXT: i64.const $push[[L5:[0-9]+]]=, 100
 ; BULK-MEM-NEXT: memory.copy 0, 0, $pop[[L4]], $0, $pop[[L5]]
 ; BULK-MEM-NEXT: return
-define void @memcpy_alloca_dst(i8* %src) {
+define void @memcpy_alloca_dst(ptr %src) {
   %a = alloca [100 x i8]
-  %p = bitcast [100 x i8]* %a to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p, i8* %src, i64 100, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %src, i64 100, i1 false)
   ret void
 }
 
@@ -203,7 +198,6 @@ define void @memcpy_alloca_dst(i8* %src) {
 ; BULK-MEM-NEXT: return
 define void @memset_alloca(i8 %val) {
   %a = alloca [100 x i8]
-  %p = bitcast [100 x i8]* %a to i8*
-  call void @llvm.memset.p0i8.i64(i8* %p, i8 %val, i64 100, i1 false)
+  call void @llvm.memset.p0.i64(ptr %a, i8 %val, i64 100, i1 false)
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/byval.ll b/llvm/test/CodeGen/WebAssembly/byval.ll
index 5a42f3b9438a..7fdf59f95c4c 100644
--- a/llvm/test/CodeGen/WebAssembly/byval.ll
+++ b/llvm/test/CodeGen/WebAssembly/byval.ll
@@ -9,15 +9,15 @@ target triple = "wasm32-unknown-unknown"
 %BigStruct = type { double, double, double, double, double, double, double, double, double, double, double, i8, i8, i8 }
 %EmptyStruct = type { }
 
-declare void @ext_func(%SmallStruct*)
-declare void @ext_func_empty(%EmptyStruct* byval(%EmptyStruct))
-declare void @ext_byval_func(%SmallStruct* byval(%SmallStruct))
-declare void @ext_byval_func_align8(%SmallStruct* byval(%SmallStruct) align 8)
-declare void @ext_byval_func_alignedstruct(%AlignedStruct* byval(%AlignedStruct))
-declare void @ext_byval_func_empty(%EmptyStruct* byval(%EmptyStruct))
+declare void @ext_func(ptr)
+declare void @ext_func_empty(ptr byval(%EmptyStruct))
+declare void @ext_byval_func(ptr byval(%SmallStruct))
+declare void @ext_byval_func_align8(ptr byval(%SmallStruct) align 8)
+declare void @ext_byval_func_alignedstruct(ptr byval(%AlignedStruct))
+declare void @ext_byval_func_empty(ptr byval(%EmptyStruct))
 
 ; CHECK-LABEL: byval_arg:
-define void @byval_arg(%SmallStruct* %ptr) {
+define void @byval_arg(ptr %ptr) {
  ; CHECK: .functype byval_arg (i32) -> ()
  ; Subtract 16 from SP (SP is 16-byte aligned)
  ; CHECK-NEXT: global.get $push[[L2:.+]]=, __stack_pointer
@@ -33,7 +33,7 @@ define void @byval_arg(%SmallStruct* %ptr) {
  ; CHECK-NEXT: i32.const $push[[L5:.+]]=, 12{{$}}
  ; CHECK-NEXT: i32.add $push[[ARG:.+]]=, $[[SP]], $pop[[L5]]{{$}}
  ; CHECK-NEXT: call ext_byval_func, $pop[[ARG]]{{$}}
- call void @ext_byval_func(%SmallStruct* byval(%SmallStruct) %ptr)
+ call void @ext_byval_func(ptr byval(%SmallStruct) %ptr)
  ; Restore the stack
  ; CHECK-NEXT: i32.const $push[[L6:.+]]=, 16
  ; CHECK-NEXT: i32.add $push[[L8:.+]]=, $[[SP]], $pop[[L6]]
@@ -43,7 +43,7 @@ define void @byval_arg(%SmallStruct* %ptr) {
 }
 
 ; CHECK-LABEL: byval_arg_align8:
-define void @byval_arg_align8(%SmallStruct* %ptr) {
+define void @byval_arg_align8(ptr %ptr) {
  ; CHECK: .functype byval_arg_align8 (i32) -> ()
  ; Don't check the entire SP sequence, just enough to get the alignment.
  ; CHECK: i32.const $push[[L1:.+]]=, 16
@@ -57,12 +57,12 @@ define void @byval_arg_align8(%SmallStruct* %ptr) {
  ; CHECK-NEXT: i32.const $push[[L5:.+]]=, 8{{$}}
  ; CHECK-NEXT: i32.add $push[[ARG:.+]]=, $[[SP]], $pop[[L5]]{{$}}
  ; CHECK-NEXT: call ext_byval_func_align8, $pop[[ARG]]{{$}}
- call void @ext_byval_func_align8(%SmallStruct* byval(%SmallStruct) align 8 %ptr)
+ call void @ext_byval_func_align8(ptr byval(%SmallStruct) align 8 %ptr)
  ret void
 }
 
 ; CHECK-LABEL: byval_arg_double:
-define void @byval_arg_double(%AlignedStruct* %ptr) {
+define void @byval_arg_double(ptr %ptr) {
  ; CHECK: .functype byval_arg_double (i32) -> ()
  ; Subtract 16 from SP (SP is 16-byte aligned)
  ; CHECK: i32.const $push[[L1:.+]]=, 16
@@ -75,32 +75,32 @@ define void @byval_arg_double(%AlignedStruct* %ptr) {
  ; CHECK-NEXT: i64.store 0($[[SP]]), $pop[[L4]]
  ; Pass a pointer to the stack slot to the function
  ; CHECK-NEXT: call ext_byval_func_alignedstruct, $[[SP]]
- tail call void @ext_byval_func_alignedstruct(%AlignedStruct* byval(%AlignedStruct) %ptr)
+ tail call void @ext_byval_func_alignedstruct(ptr byval(%AlignedStruct) %ptr)
  ret void
 }
 
 ; CHECK-LABEL: byval_param:
-define void @byval_param(%SmallStruct* byval(%SmallStruct) align 32 %ptr) {
+define void @byval_param(ptr byval(%SmallStruct) align 32 %ptr) {
  ; CHECK: .functype byval_param (i32) -> ()
  ; %ptr is just a pointer to a struct, so pass it directly through
  ; CHECK: call ext_func, $0
- call void @ext_func(%SmallStruct* %ptr)
+ call void @ext_func(ptr %ptr)
  ret void
 }
 
 ; CHECK-LABEL: byval_empty_caller:
-define void @byval_empty_caller(%EmptyStruct* %ptr) {
+define void @byval_empty_caller(ptr %ptr) {
  ; CHECK: .functype byval_empty_caller (i32) -> ()
  ; CHECK: call ext_byval_func_empty, $0
- call void @ext_byval_func_empty(%EmptyStruct* byval(%EmptyStruct) %ptr)
+ call void @ext_byval_func_empty(ptr byval(%EmptyStruct) %ptr)
  ret void
 }
 
 ; CHECK-LABEL: byval_empty_callee:
-define void @byval_empty_callee(%EmptyStruct* byval(%EmptyStruct) %ptr) {
+define void @byval_empty_callee(ptr byval(%EmptyStruct) %ptr) {
  ; CHECK: .functype byval_empty_callee (i32) -> ()
  ; CHECK: call ext_func_empty, $0
- call void @ext_func_empty(%EmptyStruct* %ptr)
+ call void @ext_func_empty(ptr %ptr)
  ret void
 }
 
@@ -116,8 +116,8 @@ define void @byval_empty_callee(%EmptyStruct* byval(%EmptyStruct) %ptr) {
 ; CHECK-NEXT: local.tee      $push[[L9:.+]]=, $[[SP:.+]]=, $pop[[L11]]{{$}}
 ; CHECK-NEXT: call           big_byval_callee,
 %big = type [131072 x i8]
-declare void @big_byval_callee(%big* byval(%big) align 1)
-define void @big_byval(%big* byval(%big) align 1 %x) {
-  call void @big_byval_callee(%big* byval(%big) align 1 %x)
+declare void @big_byval_callee(ptr byval(%big) align 1)
+define void @big_byval(ptr byval(%big) align 1 %x) {
+  call void @big_byval_callee(ptr byval(%big) align 1 %x)
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/call-indirect.ll b/llvm/test/CodeGen/WebAssembly/call-indirect.ll
index d5bb749d8a1d..b88d0965a0d5 100644
--- a/llvm/test/CodeGen/WebAssembly/call-indirect.ll
+++ b/llvm/test/CodeGen/WebAssembly/call-indirect.ll
@@ -13,7 +13,7 @@ target triple = "wasm32-unknown-unknown"
 ; REF:        call_indirect __indirect_function_table, () -> ()
 ; NOREF:      call_indirect () -> ()
 ; CHECK-NEXT: end_function
-define void @call_indirect_void(void ()* %callee) {
+define void @call_indirect_void(ptr %callee) {
   call void %callee()
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/call-pic.ll b/llvm/test/CodeGen/WebAssembly/call-pic.ll
index 5a5fd056849d..f6f24bdd0530 100644
--- a/llvm/test/CodeGen/WebAssembly/call-pic.ll
+++ b/llvm/test/CodeGen/WebAssembly/call-pic.ll
@@ -6,8 +6,8 @@ declare i32 @foo()
 declare i32 @bar()
 declare hidden i32 @hidden_function()
 
- at indirect_func = hidden global i32 ()* @foo
- at alias_func = hidden alias i32 (), i32 ()* @local_function
+ at indirect_func = hidden global ptr @foo
+ at alias_func = hidden alias i32 (), ptr @local_function
 
 define i32 @local_function() {
   ret i32 1
@@ -20,7 +20,7 @@ define void @call_indirect_func() {
 ; CHECK-NEXT: i32.add $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: i32.load $push[[L3:[0-9]+]]=, 0($pop[[L2]]){{$}}
 ; CHECK-NEXT: call_indirect $push[[L4:[0-9]+]]=, $pop[[L3]]{{$}}
-  %1 = load i32 ()*, i32 ()** @indirect_func, align 4
+  %1 = load ptr, ptr @indirect_func, align 4
   %call = call i32 %1()
   ret void
 }
@@ -45,16 +45,16 @@ define void @call_alias_func() {
   ret void
 }
 
-define i8* @get_function_address() {
+define ptr @get_function_address() {
 ; CHECK-LABEL: get_function_address:
 ; CHECK:       global.get $push[[L0:[0-9]+]]=, bar at GOT{{$}}
 ; CHECK-NEXT:  return $pop[[L0]]{{$}}
 ; CHECK-NEXT:  end_function{{$}}
 
-  ret i8* bitcast (i32 ()* @bar to i8*)
+  ret ptr @bar
 }
 
-define i8* @get_function_address_hidden() {
+define ptr @get_function_address_hidden() {
 ; CHECK-LABEL: get_function_address_hidden:
 ; CHECK:       global.get $push[[L0:[0-9]+]]=, __table_base{{$}}
 ; CHECK-NEXT:  i32.const $push[[L1:[0-9]+]]=, hidden_function at TBREL{{$}}
@@ -62,5 +62,5 @@ define i8* @get_function_address_hidden() {
 ; CHECK-NEXT:  return $pop[[L2]]{{$}}
 ; CHECK-NEXT:  end_function{{$}}
 
-  ret i8* bitcast (i32 ()* @hidden_function to i8*)
+  ret ptr @hidden_function
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/call.ll b/llvm/test/CodeGen/WebAssembly/call.ll
index 307e87d894e7..0fcc2bed2463 100644
--- a/llvm/test/CodeGen/WebAssembly/call.ll
+++ b/llvm/test/CodeGen/WebAssembly/call.ll
@@ -96,7 +96,7 @@ define i32 @call_i32_binary(i32 %a, i32 %b) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $pop[[L0]]{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @call_indirect_void(void ()* %callee) {
+define void @call_indirect_void(ptr %callee) {
   call void %callee()
   ret void
 }
@@ -106,7 +106,7 @@ define void @call_indirect_void(void ()* %callee) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @call_indirect_i32(i32 ()* %callee) {
+define i32 @call_indirect_i32(ptr %callee) {
   %t = call i32 %callee()
   ret i32 %t
 }
@@ -116,7 +116,7 @@ define i32 @call_indirect_i32(i32 ()* %callee) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @call_indirect_i64(i64 ()* %callee) {
+define i64 @call_indirect_i64(ptr %callee) {
   %t = call i64 %callee()
   ret i64 %t
 }
@@ -126,7 +126,7 @@ define i64 @call_indirect_i64(i64 ()* %callee) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define float @call_indirect_float(float ()* %callee) {
+define float @call_indirect_float(ptr %callee) {
   %t = call float %callee()
   ret float %t
 }
@@ -136,7 +136,7 @@ define float @call_indirect_float(float ()* %callee) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define double @call_indirect_double(double ()* %callee) {
+define double @call_indirect_double(ptr %callee) {
   %t = call double %callee()
   ret double %t
 }
@@ -146,7 +146,7 @@ define double @call_indirect_double(double ()* %callee) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define <16 x i8> @call_indirect_v128(<16 x i8> ()* %callee) {
+define <16 x i8> @call_indirect_v128(ptr %callee) {
   %t = call <16 x i8> %callee()
   ret <16 x i8> %t
 }
@@ -157,7 +157,7 @@ define <16 x i8> @call_indirect_v128(<16 x i8> ()* %callee) {
 ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: {{^}} call_indirect $pop[[L0]], $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @call_indirect_arg(void (i32)* %callee, i32 %arg) {
+define void @call_indirect_arg(ptr %callee, i32 %arg) {
   call void %callee(i32 %arg)
   ret void
 }
@@ -170,7 +170,7 @@ define void @call_indirect_arg(void (i32)* %callee, i32 %arg) {
 ; CHECK-NEXT: {{^}} call_indirect $push[[NUM:[0-9]+]]=, $pop[[L0]], $pop[[L1]], $pop[[L2]]{{$}}
 ; CHECK-NEXT: drop $pop[[NUM]]{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @call_indirect_arg_2(i32 (i32, i32)* %callee, i32 %arg, i32 %arg2) {
+define void @call_indirect_arg_2(ptr %callee, i32 %arg, i32 %arg2) {
   call i32 %callee(i32 %arg, i32 %arg2)
   ret void
 }
@@ -223,13 +223,13 @@ declare void @vararg_func(...)
 declare void @other_void_nullary()
 define void @call_constexpr() {
 bb0:
-  call void bitcast (void (...)* @vararg_func to void (i32, i32)*)(i32 2, i32 3)
+  call void @vararg_func(i32 2, i32 3)
   br label %bb1
 bb1:
-  call void select (i1 0, void ()* @void_nullary, void ()* @other_void_nullary)()
+  call void select (i1 0, ptr @void_nullary, ptr @other_void_nullary)()
   br label %bb2
 bb2:
-  call void inttoptr (i32 ptrtoint (void ()* @void_nullary to i32) to void ()*)()
+  call void inttoptr (i32 ptrtoint (ptr @void_nullary to i32) to ptr)()
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.ll b/llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.ll
index 06ec3c4f20b5..21a0949debc1 100644
--- a/llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.ll
+++ b/llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.ll
@@ -7,8 +7,8 @@
 
 target triple = "wasm32-unknown-unknown"
 
- at _ZTIi = external constant i8*
- at _ZTId = external constant i8*
+ at _ZTIi = external constant ptr
+ at _ZTId = external constant ptr
 
 %class.Object = type { i8 }
 %class.MyClass = type { i32 }
@@ -42,7 +42,7 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK:   end_block                                   # label[[L2]]:
 ; CHECK:   rethrow   0                                 # to caller
 ; CHECK: end_try                                       # label[[L1]]:
-define void @test0() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test0() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %try.cont unwind label %catch.dispatch
@@ -51,25 +51,25 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*), i8* bitcast (i8** @_ZTId to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi, ptr @_ZTId]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
   %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch2, label %catch.fallthrough
 
 catch2:                                           ; preds = %catch.start
-  %5 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %5 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
 catch.fallthrough:                                ; preds = %catch.start
-  %6 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTId to i8*))
+  %6 = call i32 @llvm.eh.typeid.for(ptr @_ZTId)
   %matches1 = icmp eq i32 %3, %6
   br i1 %matches1, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.fallthrough
-  %7 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %7 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -133,7 +133,7 @@ try.cont:                                         ; preds = %catch, %catch2, %en
 ; CHECK:   end_block                                   # label[[L1]]:
 ; CHECK:   call  __cxa_end_catch
 ; CHECK: end_try
-define void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test1() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %try.cont11 unwind label %catch.dispatch
@@ -142,44 +142,42 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
   %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.start
-  %5 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
-  %6 = bitcast i8* %5 to i32*
-  %7 = load i32, i32* %6, align 4
+  %5 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
+  %6 = load i32, ptr %5, align 4
   invoke void @foo() [ "funclet"(token %1) ]
           to label %try.cont unwind label %catch.dispatch2
 
 catch.dispatch2:                                  ; preds = %catch
-  %8 = catchswitch within %1 [label %catch.start3] unwind label %ehcleanup9
+  %7 = catchswitch within %1 [label %catch.start3] unwind label %ehcleanup9
 
 catch.start3:                                     ; preds = %catch.dispatch2
-  %9 = catchpad within %8 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %10 = call i8* @llvm.wasm.get.exception(token %9)
-  %11 = call i32 @llvm.wasm.get.ehselector(token %9)
-  %12 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
-  %matches4 = icmp eq i32 %11, %12
+  %8 = catchpad within %7 [ptr @_ZTIi]
+  %9 = call ptr @llvm.wasm.get.exception(token %8)
+  %10 = call i32 @llvm.wasm.get.ehselector(token %8)
+  %11 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+  %matches4 = icmp eq i32 %10, %11
   br i1 %matches4, label %catch6, label %rethrow5
 
 catch6:                                           ; preds = %catch.start3
-  %13 = call i8* @__cxa_begin_catch(i8* %10) [ "funclet"(token %9) ]
-  %14 = bitcast i8* %13 to i32*
-  %15 = load i32, i32* %14, align 4
-  invoke void @foo() [ "funclet"(token %9) ]
+  %12 = call ptr @__cxa_begin_catch(ptr %9) [ "funclet"(token %8) ]
+  %13 = load i32, ptr %12, align 4
+  invoke void @foo() [ "funclet"(token %8) ]
           to label %invoke.cont8 unwind label %ehcleanup
 
 invoke.cont8:                                     ; preds = %catch6
-  call void @__cxa_end_catch() [ "funclet"(token %9) ]
-  catchret from %9 to label %try.cont
+  call void @__cxa_end_catch() [ "funclet"(token %8) ]
+  catchret from %8 to label %try.cont
 
 rethrow5:                                         ; preds = %catch.start3
-  invoke void @llvm.wasm.rethrow() [ "funclet"(token %9) ]
+  invoke void @llvm.wasm.rethrow() [ "funclet"(token %8) ]
           to label %unreachable unwind label %ehcleanup9
 
 try.cont:                                         ; preds = %invoke.cont8, %catch
@@ -194,14 +192,14 @@ try.cont11:                                       ; preds = %try.cont, %entry
   ret void
 
 ehcleanup:                                        ; preds = %catch6
-  %16 = cleanuppad within %9 []
-  call void @__cxa_end_catch() [ "funclet"(token %16) ]
-  cleanupret from %16 unwind label %ehcleanup9
+  %14 = cleanuppad within %8 []
+  call void @__cxa_end_catch() [ "funclet"(token %14) ]
+  cleanupret from %14 unwind label %ehcleanup9
 
 ehcleanup9:                                       ; preds = %ehcleanup, %rethrow5, %catch.dispatch2
-  %17 = cleanuppad within %1 []
-  call void @__cxa_end_catch() [ "funclet"(token %17) ]
-  cleanupret from %17 unwind to caller
+  %15 = cleanuppad within %1 []
+  call void @__cxa_end_catch() [ "funclet"(token %15) ]
+  cleanupret from %15 unwind to caller
 
 unreachable:                                      ; preds = %rethrow5
   unreachable
@@ -245,7 +243,7 @@ unreachable:                                      ; preds = %rethrow5
 ; CHECK:     br        0                               # 0: up to label[[L0]]
 ; CHECK:   end_loop
 ; CHECK: end_try                                       # label[[L3]]:
-define void @test2() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test2() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %try.cont unwind label %catch.dispatch
@@ -254,10 +252,10 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %catch.start
@@ -311,7 +309,7 @@ terminate:                                        ; preds = %ehcleanup
 ; NOOPT:   call      bar
 ; NOOPT: catch     {{.*}}
 ; NOOPT: end_try
-define void @test3() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test3() personality ptr @__gxx_wasm_personality_v0 {
 bb0:
   br i1 undef, label %bb1, label %bb2
 
@@ -333,8 +331,8 @@ catch.dispatch:                                   ; preds = %bb4, %bb3
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %try.cont
 
@@ -351,13 +349,13 @@ try.cont:                                         ; preds = %catch.start, %bb4,
 ; CHECK:   catch
 ; CHECK:   end_try
 ; CHECK: end_loop
-define void @test4(i32* %p) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test4(ptr %p) personality ptr @__gxx_wasm_personality_v0 {
 entry:
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   br label %loop
 
 loop:                                             ; preds = %try.cont, %entry
-  store volatile i32 1, i32* %p
+  store volatile i32 1, ptr %p
   invoke void @foo()
           to label %try.cont unwind label %catch.dispatch
 
@@ -365,8 +363,8 @@ catch.dispatch:                                   ; preds = %loop
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %try.cont
 
@@ -409,7 +407,7 @@ try.cont:                                         ; preds = %catch.start, %loop
 ; NOSORT: end_try
 ; NOSORT: return
 
-define void @test5() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test5() personality ptr @__gxx_wasm_personality_v0 {
 bb0:
   invoke void @foo()
           to label %bb1 unwind label %catch.dispatch0
@@ -422,8 +420,8 @@ catch.dispatch0:                                  ; preds = %bb0
   %0 = catchswitch within none [label %catch.start0] unwind to caller
 
 catch.start0:                                     ; preds = %catch.dispatch0
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %try.cont
 
@@ -431,8 +429,8 @@ catch.dispatch1:                                  ; preds = %bb1
   %4 = catchswitch within none [label %catch.start1] unwind to caller
 
 catch.start1:                                     ; preds = %catch.dispatch1
-  %5 = catchpad within %4 [i8* null]
-  %6 = call i8* @llvm.wasm.get.exception(token %5)
+  %5 = catchpad within %4 [ptr null]
+  %6 = call ptr @llvm.wasm.get.exception(token %5)
   %7 = call i32 @llvm.wasm.get.ehselector(token %5)
   catchret from %5 to label %try.cont
 
@@ -464,7 +462,7 @@ try.cont:                                         ; preds = %catch.start1, %catc
 ; NOSORT:   return
 ; NOSORT: end_try
 
-define void @test6() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test6() personality ptr @__gxx_wasm_personality_v0 {
 bb0:
   invoke void @foo()
           to label %bb1 unwind label %catch.dispatch0
@@ -479,8 +477,8 @@ catch.dispatch0:                                  ; preds = %bb0
   %0 = catchswitch within none [label %catch.start0] unwind to caller
 
 catch.start0:                                     ; preds = %catch.dispatch0
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %try.cont
 
@@ -516,7 +514,7 @@ try.cont:                                         ; preds = %catch.start0
 ; NOSORT: end_try
 ; NOSORT: return
 
-define void @test7() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test7() personality ptr @__gxx_wasm_personality_v0 {
 bb0:
   invoke void @foo()
           to label %bb1 unwind label %catch.dispatch0
@@ -530,8 +528,8 @@ catch.dispatch0:                                  ; preds = %bb0
   %0 = catchswitch within none [label %catch.start0] unwind to caller
 
 catch.start0:                                     ; preds = %catch.dispatch0
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %try.cont
 
@@ -539,8 +537,8 @@ catch.dispatch1:                                  ; preds = %bb1
   %4 = catchswitch within none [label %catch.start1] unwind to caller
 
 catch.start1:                                     ; preds = %catch.dispatch1
-  %5 = catchpad within %4 [i8* null]
-  %6 = call i8* @llvm.wasm.get.exception(token %5)
+  %5 = catchpad within %4 [ptr null]
+  %6 = call ptr @llvm.wasm.get.exception(token %5)
   %7 = call i32 @llvm.wasm.get.ehselector(token %5)
   catchret from %5 to label %try.cont
 
@@ -571,7 +569,7 @@ try.cont:                                         ; preds = %catch.start1, %catc
 ; NOSORT:   return
 ; NOSORT: end_try
 
-define i32 @test8() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define i32 @test8() personality ptr @__gxx_wasm_personality_v0 {
 bb0:
   invoke void @foo()
           to label %bb1 unwind label %catch.dispatch0
@@ -584,8 +582,8 @@ catch.dispatch0:                                  ; preds = %bb0
   %1 = catchswitch within none [label %catch.start0] unwind to caller
 
 catch.start0:                                     ; preds = %catch.dispatch0
-  %2 = catchpad within %1 [i8* null]
-  %3 = call i8* @llvm.wasm.get.exception(token %2)
+  %2 = catchpad within %1 [ptr null]
+  %3 = call ptr @llvm.wasm.get.exception(token %2)
   %j = call i32 @llvm.wasm.get.ehselector(token %2)
   catchret from %2 to label %try.cont
 
@@ -597,7 +595,7 @@ try.cont:                                         ; preds = %catch.start0
 ; unstackified in fixCallUnwindMismatches in CFGStackify.
 
 ; NOSORT-LOCALS-LABEL: test9:
-define void @test9(i32 %x) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test9(i32 %x) personality ptr @__gxx_wasm_personality_v0 {
 bb0:
   invoke void @foo()
           to label %bb1 unwind label %catch.dispatch0
@@ -610,11 +608,11 @@ bb1:                                              ; preds = %bb0
   ; But when we introduce a nested try-delegate in fixCallUnwindMismatches in
   ; CFGStackify, it is possible that we end up unstackifying the first dest
   ; register. In that case, we convert that tee into a copy.
-  %addr = inttoptr i32 %t to i32*
-  %load = load i32, i32* %addr
+  %addr = inttoptr i32 %t to ptr
+  %load = load i32, ptr %addr
   %call = call i32 @baz()
   %add = add i32 %load, %call
-  store i32 %add, i32* %addr
+  store i32 %add, ptr %addr
   ret void
 ; NOSORT-LOCALS:       i32.add
 ; NOSORT-LOCALS-NOT:   local.tee
@@ -624,8 +622,8 @@ catch.dispatch0:                                  ; preds = %bb0
   %0 = catchswitch within none [label %catch.start0] unwind to caller
 
 catch.start0:                                     ; preds = %catch.dispatch0
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %try.cont
 
@@ -669,7 +667,7 @@ try.cont:                                         ; preds = %catch.start0
 ; NOSORT: end_try
 ; NOSORT: return
 
-define void @test10() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test10() personality ptr @__gxx_wasm_personality_v0 {
 bb0:
   invoke void @foo()
           to label %bb1 unwind label %catch.dispatch0
@@ -682,10 +680,10 @@ catch.dispatch0:                                  ; preds = %bb0
   %0 = catchswitch within none [label %catch.start0] unwind to caller
 
 catch.start0:                                     ; preds = %catch.dispatch0
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -693,10 +691,10 @@ catch.dispatch1:                                  ; preds = %bb1
   %5 = catchswitch within none [label %catch.start1] unwind to caller
 
 catch.start1:                                     ; preds = %catch.dispatch1
-  %6 = catchpad within %5 [i8* null]
-  %7 = call i8* @llvm.wasm.get.exception(token %6)
+  %6 = catchpad within %5 [ptr null]
+  %7 = call ptr @llvm.wasm.get.exception(token %6)
   %8 = call i32 @llvm.wasm.get.ehselector(token %6)
-  %9 = call i8* @__cxa_begin_catch(i8* %7) [ "funclet"(token %6) ]
+  %9 = call ptr @__cxa_begin_catch(ptr %7) [ "funclet"(token %6) ]
   call void @__cxa_end_catch() [ "funclet"(token %6) ]
   catchret from %6 to label %try.cont
 
@@ -720,7 +718,7 @@ try.cont:                                         ; preds = %catch.start1, %catc
 ; NOOPT:   call      foo
 ; NOOPT: end_block
 ; NOOPT: return
-define void @test11(i32 %arg) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test11(i32 %arg) personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %tobool = icmp ne i32 %arg, 0
   br i1 %tobool, label %if.then, label %if.end
@@ -729,10 +727,10 @@ catch.dispatch:                                   ; preds = %if.then
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %if.end
 
@@ -765,22 +763,22 @@ if.end:                                           ; preds = %cont, %catch.start,
 ; NOSORT: catch_all
 ; NOSORT:   rethrow 0
 ; NOSORT: end_try
-define void @test12(i8* %a, i8* %b) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test12(ptr %a, ptr %b) personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %o = alloca %class.Object, align 1
   invoke void @foo()
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %b, i32 100, i1 false)
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* %a, i8* %b, i32 100, i1 false)
-  call void @llvm.memset.p0i8.i32(i8* %a, i8 0, i32 100, i1 false)
-  %call = call %class.Object* @_ZN6ObjectD2Ev(%class.Object* %o)
+  call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr %b, i32 100, i1 false)
+  call void @llvm.memmove.p0.p0.i32(ptr %a, ptr %b, i32 100, i1 false)
+  call void @llvm.memset.p0.i32(ptr %a, i8 0, i32 100, i1 false)
+  %call = call ptr @_ZN6ObjectD2Ev(ptr %o)
   ret void
 
 ehcleanup:                                        ; preds = %entry
   %0 = cleanuppad within none []
-  %call2 = call %class.Object* @_ZN6ObjectD2Ev(%class.Object* %o) [ "funclet"(token %0) ]
+  %call2 = call ptr @_ZN6ObjectD2Ev(ptr %o) [ "funclet"(token %0) ]
   cleanupret from %0 unwind to caller
 }
 
@@ -793,7 +791,7 @@ ehcleanup:                                        ; preds = %entry
 ; CHECK: try
 ; CHECK: call      $push{{.*}}=, nothrow_i32
 ; CHECK: call      fun, $pop{{.*}}
-define void @test13() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test13() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %call = call i32 @nothrow_i32()
   invoke void @fun(i32 %call)
@@ -811,7 +809,7 @@ terminate:                                        ; preds = %entry
 ; This crashed on debug mode (= when NDEBUG is not defined) when the logic for
 ; computing the innermost region was not correct, in which a loop region
 ; contains an exception region. This should pass CFGSort without crashing.
-define void @test14() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test14() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %e = alloca %class.MyClass, align 4
   br label %for.cond
@@ -829,25 +827,23 @@ catch.dispatch:                                   ; preds = %for.body
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast ({ i8*, i8* }* @_ZTI7MyClass to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTI7MyClass]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast ({ i8*, i8* }* @_ZTI7MyClass to i8*))
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTI7MyClass)
   %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.start
-  %5 = call i8* @__cxa_get_exception_ptr(i8* %2) [ "funclet"(token %1) ]
-  %6 = bitcast i8* %5 to %class.MyClass*
-  %call = call %class.MyClass* @_ZN7MyClassC2ERKS_(%class.MyClass* %e, %class.MyClass* dereferenceable(4) %6) [ "funclet"(token %1) ]
-  %7 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
-  %x = getelementptr inbounds %class.MyClass, %class.MyClass* %e, i32 0, i32 0
-  %8 = load i32, i32* %x, align 4
-  invoke void @quux(i32 %8) [ "funclet"(token %1) ]
+  %5 = call ptr @__cxa_get_exception_ptr(ptr %2) [ "funclet"(token %1) ]
+  %call = call ptr @_ZN7MyClassC2ERKS_(ptr %e, ptr dereferenceable(4) %5) [ "funclet"(token %1) ]
+  %6 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
+  %7 = load i32, ptr %e, align 4
+  invoke void @quux(i32 %7) [ "funclet"(token %1) ]
           to label %invoke.cont2 unwind label %ehcleanup
 
 invoke.cont2:                                     ; preds = %catch
-  %call3 = call %class.MyClass* @_ZN7MyClassD2Ev(%class.MyClass* %e) [ "funclet"(token %1) ]
+  %call3 = call ptr @_ZN7MyClassD2Ev(ptr %e) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %for.inc
 
@@ -860,20 +856,20 @@ for.inc:                                          ; preds = %invoke.cont2, %for.
   br label %for.cond
 
 ehcleanup:                                        ; preds = %catch
-  %9 = cleanuppad within %1 []
-  %call4 = call %class.MyClass* @_ZN7MyClassD2Ev(%class.MyClass* %e) [ "funclet"(token %9) ]
-  invoke void @__cxa_end_catch() [ "funclet"(token %9) ]
+  %8 = cleanuppad within %1 []
+  %call4 = call ptr @_ZN7MyClassD2Ev(ptr %e) [ "funclet"(token %8) ]
+  invoke void @__cxa_end_catch() [ "funclet"(token %8) ]
           to label %invoke.cont6 unwind label %terminate7
 
 invoke.cont6:                                     ; preds = %ehcleanup
-  cleanupret from %9 unwind to caller
+  cleanupret from %8 unwind to caller
 
 for.end:                                          ; preds = %for.cond
   ret void
 
 terminate7:                                       ; preds = %ehcleanup
-  %10 = cleanuppad within %9 []
-  call void @_ZSt9terminatev() [ "funclet"(token %10) ]
+  %9 = cleanuppad within %8 []
+  call void @_ZSt9terminatev() [ "funclet"(token %9) ]
   unreachable
 }
 
@@ -891,7 +887,7 @@ terminate7:                                       ; preds = %ehcleanup
 ; bb2:            <- Continuation BB
 ;   end
 ; CHECK-LABEL: test15:
-define void @test15(i32 %n) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test15(i32 %n) personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %for.body unwind label %catch.dispatch
@@ -917,10 +913,10 @@ catch.dispatch:                                   ; preds = %for.body, %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -960,7 +956,7 @@ try.cont:                                         ; preds = %catch.start, %for.e
 ;   end
 ;
 ; CHECK-LABEL: test16:
-define void @test16() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test16() personality ptr @__gxx_wasm_personality_v0 {
 ; CHECK: call foo
 entry:
   invoke void @foo()
@@ -977,10 +973,10 @@ catch.dispatch:                                   ; preds = %invoke.cont
 
 ; CHECK: catch
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   invoke void @__cxa_end_catch() [ "funclet"(token %1) ]
           to label %invoke.cont2 unwind label %catch.dispatch3
 
@@ -988,10 +984,10 @@ catch.dispatch3:                                  ; preds = %catch.start, %catch
   %5 = catchswitch within none [label %catch.start4] unwind to caller
 
 catch.start4:                                     ; preds = %catch.dispatch3
-  %6 = catchpad within %5 [i8* null]
-  %7 = call i8* @llvm.wasm.get.exception(token %6)
+  %6 = catchpad within %5 [ptr null]
+  %7 = call ptr @llvm.wasm.get.exception(token %6)
   %8 = call i32 @llvm.wasm.get.ehselector(token %6)
-  %9 = call i8* @__cxa_begin_catch(i8* %7) [ "funclet"(token %6) ]
+  %9 = call ptr @__cxa_begin_catch(ptr %7) [ "funclet"(token %6) ]
   call void @__cxa_end_catch() [ "funclet"(token %6) ]
   catchret from %6 to label %try.cont8
 
@@ -1012,7 +1008,7 @@ invoke.cont2:                                     ; preds = %catch.start
 ; NOSORT: try
 ; NOSORT: end_try
 ; NOSORT: end_loop
-define void @test17(i32 %n) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test17(i32 %n) personality ptr @__gxx_wasm_personality_v0 {
 entry:
   br label %while.cond
 
@@ -1030,10 +1026,10 @@ catch.dispatch:                                   ; preds = %while.body
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   invoke void @__cxa_end_catch() [ "funclet"(token %1) ]
           to label %invoke.cont unwind label %ehcleanup
 
@@ -1063,7 +1059,7 @@ while.end:                                        ; preds = %while.body, %while.
 ; NOSORT: catch_all
 ; NOSORT: end_try
 ; NOSORT-NEXT: end_function
-define i32 @test18(i32 %n) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define i32 @test18(i32 %n) personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %t = alloca %class.Object, align 1
   br label %for.cond
@@ -1083,7 +1079,7 @@ if.then:                                          ; preds = %for.body
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %if.then
-  %call2 = call %class.Object* @_ZN6ObjectD2Ev(%class.Object* %t)
+  %call2 = call ptr @_ZN6ObjectD2Ev(ptr %t)
   ret i32 %call
 
 for.inc:                                          ; preds = %for.body
@@ -1092,7 +1088,7 @@ for.inc:                                          ; preds = %for.body
 
 ehcleanup:                                        ; preds = %if.then
   %0 = cleanuppad within none []
-  %call3 = call %class.Object* @_ZN6ObjectD2Ev(%class.Object* %t) [ "funclet"(token %0) ]
+  %call3 = call ptr @_ZN6ObjectD2Ev(ptr %t) [ "funclet"(token %0) ]
   cleanupret from %0 unwind to caller
 }
 
@@ -1109,7 +1105,7 @@ ehcleanup:                                        ; preds = %if.then
 ; CHECK:    call  quux, $[[RET]]
 ; CHECK: catch_all
 ; CHECK: end_try
-define void @test19() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test19() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %call = call i32 @baz()
   invoke void @quux(i32 %call)
@@ -1154,7 +1150,7 @@ invoke.cont:                                      ; preds = %entry
 ; NOSORT-LABEL: test20:
 ; NOSORT: try
 ; NOSORT:   br_if   0
-define void @test20(i1 %arg) personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test20(i1 %arg) personality ptr @__gxx_wasm_personality_v0 {
 entry:
   br i1 %arg, label %bb0, label %dest
 
@@ -1170,8 +1166,8 @@ catch.dispatch0:                                  ; preds = %bb0
   %0 = catchswitch within none [label %catch.start0] unwind to caller
 
 catch.start0:                                     ; preds = %catch.dispatch0
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %try.cont
 
@@ -1182,8 +1178,8 @@ catch.dispatch1:                                  ; preds = %bb1
   %4 = catchswitch within none [label %catch.start1] unwind to caller
 
 catch.start1:                                     ; preds = %catch.dispatch1
-  %5 = catchpad within %4 [i8* null]
-  %6 = call i8* @llvm.wasm.get.exception(token %5)
+  %5 = catchpad within %4 [ptr null]
+  %6 = call ptr @llvm.wasm.get.exception(token %5)
   %7 = call i32 @llvm.wasm.get.ehselector(token %5)
   catchret from %5 to label %try.cont
 
@@ -1218,20 +1214,20 @@ try.cont:                                         ; preds = %catch.start1, %catc
 ;           <- (b) The br destination should be remapped to here
 ;
 ; The test was reduced by bugpoint and should not crash in CFGStackify.
-define void @test21() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test21() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   br i1 undef, label %if.then, label %if.end12
 
 if.then:                                          ; preds = %entry
-  invoke void @__cxa_throw(i8* null, i8* null, i8* null) #1
+  invoke void @__cxa_throw(ptr null, ptr null, ptr null) #1
           to label %unreachable unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %if.then
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %catchret.dest
 
@@ -1240,15 +1236,15 @@ catchret.dest:                                    ; preds = %catch.start
           to label %invoke.cont unwind label %catch.dispatch4
 
 invoke.cont:                                      ; preds = %catchret.dest
-  invoke void @__cxa_throw(i8* null, i8* null, i8* null) #1
+  invoke void @__cxa_throw(ptr null, ptr null, ptr null) #1
           to label %unreachable unwind label %catch.dispatch4
 
 catch.dispatch4:                                  ; preds = %invoke.cont, %catchret.dest
   %4 = catchswitch within none [label %catch.start5] unwind to caller
 
 catch.start5:                                     ; preds = %catch.dispatch4
-  %5 = catchpad within %4 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %6 = call i8* @llvm.wasm.get.exception(token %5)
+  %5 = catchpad within %4 [ptr @_ZTIi]
+  %6 = call ptr @llvm.wasm.get.exception(token %5)
   %7 = call i32 @llvm.wasm.get.ehselector(token %5)
   unreachable
 
@@ -1260,8 +1256,8 @@ catch.dispatch16:                                 ; preds = %if.end12
   %8 = catchswitch within none [label %catch.start17] unwind label %ehcleanup
 
 catch.start17:                                    ; preds = %catch.dispatch16
-  %9 = catchpad within %8 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %10 = call i8* @llvm.wasm.get.exception(token %9)
+  %9 = catchpad within %8 [ptr @_ZTIi]
+  %10 = call ptr @llvm.wasm.get.exception(token %9)
   %11 = call i32 @llvm.wasm.get.ehselector(token %9)
   br i1 undef, label %catch20, label %rethrow19
 
@@ -1296,7 +1292,7 @@ unreachable:                                      ; preds = %rethrow19, %invoke.
 ; Regression test for WasmEHFuncInfo's reverse mapping bug. 'UnwindDestToSrc'
 ; should return a vector and not a single BB, which was incorrect.
 ; This was reduced by bugpoint and should not crash in CFGStackify.
-define void @test22() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test22() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %invoke.cont unwind label %catch.dispatch
@@ -1305,18 +1301,18 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind label %ehcleanup22
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  invoke void @__cxa_throw(i8* null, i8* null, i8* null) #1 [ "funclet"(token %1) ]
+  invoke void @__cxa_throw(ptr null, ptr null, ptr null) #1 [ "funclet"(token %1) ]
           to label %unreachable unwind label %catch.dispatch2
 
 catch.dispatch2:                                  ; preds = %catch.start
   %4 = catchswitch within %1 [label %catch.start3] unwind label %ehcleanup
 
 catch.start3:                                     ; preds = %catch.dispatch2
-  %5 = catchpad within %4 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %6 = call i8* @llvm.wasm.get.exception(token %5)
+  %5 = catchpad within %4 [ptr @_ZTIi]
+  %6 = call ptr @llvm.wasm.get.exception(token %5)
   %7 = call i32 @llvm.wasm.get.ehselector(token %5)
   catchret from %5 to label %try.cont
 
@@ -1325,15 +1321,15 @@ try.cont:                                         ; preds = %catch.start3
           to label %invoke.cont8 unwind label %ehcleanup
 
 invoke.cont8:                                     ; preds = %try.cont
-  invoke void @__cxa_throw(i8* null, i8* null, i8* null) #1 [ "funclet"(token %1) ]
+  invoke void @__cxa_throw(ptr null, ptr null, ptr null) #1 [ "funclet"(token %1) ]
           to label %unreachable unwind label %catch.dispatch11
 
 catch.dispatch11:                                 ; preds = %invoke.cont8
   %8 = catchswitch within %1 [label %catch.start12] unwind label %ehcleanup
 
 catch.start12:                                    ; preds = %catch.dispatch11
-  %9 = catchpad within %8 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %10 = call i8* @llvm.wasm.get.exception(token %9)
+  %9 = catchpad within %8 [ptr @_ZTIi]
+  %10 = call ptr @llvm.wasm.get.exception(token %9)
   %11 = call i32 @llvm.wasm.get.ehselector(token %9)
   unreachable
 
@@ -1368,62 +1364,59 @@ unreachable:                                      ; preds = %invoke.cont8, %catc
 ; included in catch.start's exception. Also, after we take catch.start2's
 ; exception out of catch.start's exception, we have to take out try.cont8 out of
 ; catch.start's exception, because it has a predecessor in catch.start2.
-define void @test23() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test23() personality ptr @__gxx_wasm_personality_v0 {
 entry:
-  %exception = call i8* @__cxa_allocate_exception(i32 4) #0
-  %0 = bitcast i8* %exception to i32*
-  store i32 0, i32* %0, align 16
-  invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #1
+  %exception = call ptr @__cxa_allocate_exception(i32 4) #0
+  store i32 0, ptr %exception, align 16
+  invoke void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #1
           to label %unreachable unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %entry
-  %1 = catchswitch within none [label %catch.start] unwind label %catch.dispatch1
+  %0 = catchswitch within none [label %catch.start] unwind label %catch.dispatch1
 
 catch.start:                                      ; preds = %catch.dispatch
-  %2 = catchpad within %1 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %3 = call i8* @llvm.wasm.get.exception(token %2)
-  %4 = call i32 @llvm.wasm.get.ehselector(token %2)
-  %5 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #0
-  %matches = icmp eq i32 %4, %5
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
+  %3 = call i32 @llvm.wasm.get.ehselector(token %1)
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #0
+  %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.start
-  %6 = call i8* @__cxa_begin_catch(i8* %3) #0 [ "funclet"(token %2) ]
-  %7 = bitcast i8* %6 to i32*
-  %8 = load i32, i32* %7, align 4
-  call void @__cxa_end_catch() #0 [ "funclet"(token %2) ]
-  catchret from %2 to label %catchret.dest
+  %5 = call ptr @__cxa_begin_catch(ptr %2) #0 [ "funclet"(token %1) ]
+  %6 = load i32, ptr %5, align 4
+  call void @__cxa_end_catch() #0 [ "funclet"(token %1) ]
+  catchret from %1 to label %catchret.dest
 
 catchret.dest:                                    ; preds = %catch
   br label %try.cont
 
 rethrow:                                          ; preds = %catch.start
-  invoke void @llvm.wasm.rethrow() #1 [ "funclet"(token %2) ]
+  invoke void @llvm.wasm.rethrow() #1 [ "funclet"(token %1) ]
           to label %unreachable unwind label %catch.dispatch1
 
 catch.dispatch1:                                  ; preds = %rethrow, %catch.dispatch
-  %9 = catchswitch within none [label %catch.start2] unwind to caller
+  %7 = catchswitch within none [label %catch.start2] unwind to caller
 
 catch.start2:                                     ; preds = %catch.dispatch1
-  %10 = catchpad within %9 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %11 = call i8* @llvm.wasm.get.exception(token %10)
-  %12 = call i32 @llvm.wasm.get.ehselector(token %10)
-  %13 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #0
-  %matches3 = icmp eq i32 %12, %13
+  %8 = catchpad within %7 [ptr @_ZTIi]
+  %9 = call ptr @llvm.wasm.get.exception(token %8)
+  %10 = call i32 @llvm.wasm.get.ehselector(token %8)
+  %11 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #0
+  %matches3 = icmp eq i32 %10, %11
   br i1 %matches3, label %catch5, label %rethrow4
 
 catch5:                                           ; preds = %catch.start2
-  %14 = call i8* @__cxa_begin_catch(i8* %11) #0 [ "funclet"(token %10) ]
-  %15 = bitcast i8* %14 to i32*
-  %16 = load i32, i32* %15, align 4
-  call void @__cxa_end_catch() #0 [ "funclet"(token %10) ]
-  catchret from %10 to label %catchret.dest7
+  %12 = call ptr @__cxa_begin_catch(ptr %9) #0 [ "funclet"(token %8) ]
+  %13 = load i32, ptr %12, align 4
+  call void @__cxa_end_catch() #0 [ "funclet"(token %8) ]
+  catchret from %8 to label %catchret.dest7
 
 catchret.dest7:                                   ; preds = %catch5
   br label %try.cont8
 
 rethrow4:                                         ; preds = %catch.start2
-  call void @llvm.wasm.rethrow() #1 [ "funclet"(token %10) ]
+  call void @llvm.wasm.rethrow() #1 [ "funclet"(token %8) ]
   unreachable
 
 try.cont8:                                        ; preds = %try.cont, %catchret.dest7
@@ -1449,8 +1442,7 @@ unreachable:                                      ; preds = %rethrow, %entry
 ; exception first, before taking out catch.start12's exception out of
 ; catch.start4's exception; otherwise we end up with an incorrect relationship
 ; of catch.start's exception > catch.start12's exception.
-define void @test24() personality i8* bitcast (i32 (...)*
- at __gxx_wasm_personality_v0 to i8*) {
+define void @test24() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %invoke.cont unwind label %catch.dispatch
@@ -1467,17 +1459,16 @@ catch.dispatch11:                                 ; preds = %rethrow6, %catch.di
   %0 = catchswitch within none [label %catch.start12] unwind to caller
 
 catch.start12:                                    ; preds = %catch.dispatch11
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #0
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #0
   %matches13 = icmp eq i32 %3, %4
   br i1 %matches13, label %catch15, label %rethrow14
 
 catch15:                                          ; preds = %catch.start12
-  %5 = call i8* @__cxa_begin_catch(i8* %2) #0 [ "funclet"(token %1) ]
-  %6 = bitcast i8* %5 to i32*
-  %7 = load i32, i32* %6, align 4
+  %5 = call ptr @__cxa_begin_catch(ptr %2) #0 [ "funclet"(token %1) ]
+  %6 = load i32, ptr %5, align 4
   call void @__cxa_end_catch() #0 [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont18
 
@@ -1486,47 +1477,45 @@ rethrow14:                                        ; preds = %catch.start12
   unreachable
 
 catch.dispatch3:                                  ; preds = %rethrow, %catch.dispatch
-  %8 = catchswitch within none [label %catch.start4] unwind label %catch.dispatch11
+  %7 = catchswitch within none [label %catch.start4] unwind label %catch.dispatch11
 
 catch.start4:                                     ; preds = %catch.dispatch3
-  %9 = catchpad within %8 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %10 = call i8* @llvm.wasm.get.exception(token %9)
-  %11 = call i32 @llvm.wasm.get.ehselector(token %9)
-  %12 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #0
-  %matches5 = icmp eq i32 %11, %12
+  %8 = catchpad within %7 [ptr @_ZTIi]
+  %9 = call ptr @llvm.wasm.get.exception(token %8)
+  %10 = call i32 @llvm.wasm.get.ehselector(token %8)
+  %11 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #0
+  %matches5 = icmp eq i32 %10, %11
   br i1 %matches5, label %catch7, label %rethrow6
 
 catch7:                                           ; preds = %catch.start4
-  %13 = call i8* @__cxa_begin_catch(i8* %10) #0 [ "funclet"(token %9) ]
-  %14 = bitcast i8* %13 to i32*
-  %15 = load i32, i32* %14, align 4
-  call void @__cxa_end_catch() #0 [ "funclet"(token %9) ]
-  catchret from %9 to label %try.cont18
+  %12 = call ptr @__cxa_begin_catch(ptr %9) #0 [ "funclet"(token %8) ]
+  %13 = load i32, ptr %12, align 4
+  call void @__cxa_end_catch() #0 [ "funclet"(token %8) ]
+  catchret from %8 to label %try.cont18
 
 rethrow6:                                         ; preds = %catch.start4
-  invoke void @llvm.wasm.rethrow() #1 [ "funclet"(token %9) ]
+  invoke void @llvm.wasm.rethrow() #1 [ "funclet"(token %8) ]
           to label %unreachable unwind label %catch.dispatch11
 
 catch.dispatch:                                   ; preds = %invoke.cont1, %invoke.cont, %entry
-  %16 = catchswitch within none [label %catch.start] unwind label %catch.dispatch3
+  %14 = catchswitch within none [label %catch.start] unwind label %catch.dispatch3
 
 catch.start:                                      ; preds = %catch.dispatch
-  %17 = catchpad within %16 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %18 = call i8* @llvm.wasm.get.exception(token %17)
-  %19 = call i32 @llvm.wasm.get.ehselector(token %17)
-  %20 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #0
-  %matches = icmp eq i32 %19, %20
+  %15 = catchpad within %14 [ptr @_ZTIi]
+  %16 = call ptr @llvm.wasm.get.exception(token %15)
+  %17 = call i32 @llvm.wasm.get.ehselector(token %15)
+  %18 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #0
+  %matches = icmp eq i32 %17, %18
   br i1 %matches, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.start
-  %21 = call i8* @__cxa_begin_catch(i8* %18) #0 [ "funclet"(token %17) ]
-  %22 = bitcast i8* %21 to i32*
-  %23 = load i32, i32* %22, align 4
-  call void @__cxa_end_catch() #0 [ "funclet"(token %17) ]
-  catchret from %17 to label %try.cont18
+  %19 = call ptr @__cxa_begin_catch(ptr %16) #0 [ "funclet"(token %15) ]
+  %20 = load i32, ptr %19, align 4
+  call void @__cxa_end_catch() #0 [ "funclet"(token %15) ]
+  catchret from %15 to label %try.cont18
 
 rethrow:                                          ; preds = %catch.start
-  invoke void @llvm.wasm.rethrow() #1 [ "funclet"(token %17) ]
+  invoke void @llvm.wasm.rethrow() #1 [ "funclet"(token %15) ]
           to label %unreachable unwind label %catch.dispatch3
 
 try.cont18:                                       ; preds = %catch, %catch7, %catch15, %invoke.cont1
@@ -1556,56 +1545,53 @@ unreachable:                                      ; preds = %rethrow, %rethrow6
 ; contained in (a)'s exception. Because (a)'s unwind destination is (b), (b)'s
 ; exception is taken out of (a)'s. But because (c) is reachable from (b), we
 ; should make sure to take out (c)'s exception out of (a)'s exception too.
-define void @test25() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test25() personality ptr @__gxx_wasm_personality_v0 {
 entry:
-  %exception = call i8* @__cxa_allocate_exception(i32 4) #1
-  %0 = bitcast i8* %exception to i32*
-  store i32 0, i32* %0, align 16
-  invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #3
+  %exception = call ptr @__cxa_allocate_exception(i32 4) #1
+  store i32 0, ptr %exception, align 16
+  invoke void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #3
           to label %unreachable unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %entry
-  %1 = catchswitch within none [label %catch.start] unwind label %catch.dispatch1
+  %0 = catchswitch within none [label %catch.start] unwind label %catch.dispatch1
 
 catch.start:                                      ; preds = %catch.dispatch
-  %2 = catchpad within %1 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %3 = call i8* @llvm.wasm.get.exception(token %2)
-  %4 = call i32 @llvm.wasm.get.ehselector(token %2)
-  %5 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #1
-  %matches = icmp eq i32 %4, %5
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
+  %3 = call i32 @llvm.wasm.get.ehselector(token %1)
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #1
+  %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.start
-  %6 = call i8* @__cxa_begin_catch(i8* %3) #1 [ "funclet"(token %2) ]
-  %7 = bitcast i8* %6 to i32*
-  %8 = load i32, i32* %7, align 4
-  call void @__cxa_end_catch() #1 [ "funclet"(token %2) ]
-  catchret from %2 to label %try.cont8
+  %5 = call ptr @__cxa_begin_catch(ptr %2) #1 [ "funclet"(token %1) ]
+  %6 = load i32, ptr %5, align 4
+  call void @__cxa_end_catch() #1 [ "funclet"(token %1) ]
+  catchret from %1 to label %try.cont8
 
 rethrow:                                          ; preds = %catch.start
-  invoke void @llvm.wasm.rethrow() #3 [ "funclet"(token %2) ]
+  invoke void @llvm.wasm.rethrow() #3 [ "funclet"(token %1) ]
           to label %unreachable unwind label %catch.dispatch1
 
 catch.dispatch1:                                  ; preds = %rethrow, %catch.dispatch
-  %9 = catchswitch within none [label %catch.start2] unwind to caller
+  %7 = catchswitch within none [label %catch.start2] unwind to caller
 
 catch.start2:                                     ; preds = %catch.dispatch1
-  %10 = catchpad within %9 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %11 = call i8* @llvm.wasm.get.exception(token %10)
-  %12 = call i32 @llvm.wasm.get.ehselector(token %10)
-  %13 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #1
-  %matches3 = icmp eq i32 %12, %13
+  %8 = catchpad within %7 [ptr @_ZTIi]
+  %9 = call ptr @llvm.wasm.get.exception(token %8)
+  %10 = call i32 @llvm.wasm.get.ehselector(token %8)
+  %11 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #1
+  %matches3 = icmp eq i32 %10, %11
   br i1 %matches3, label %catch5, label %rethrow4
 
 catch5:                                           ; preds = %catch.start2
-  %14 = call i8* @__cxa_begin_catch(i8* %11) #1 [ "funclet"(token %10) ]
-  %15 = bitcast i8* %14 to i32*
-  %16 = load i32, i32* %15, align 4
-  call void @__cxa_end_catch() #1 [ "funclet"(token %10) ]
-  catchret from %10 to label %try.cont8
+  %12 = call ptr @__cxa_begin_catch(ptr %9) #1 [ "funclet"(token %8) ]
+  %13 = load i32, ptr %12, align 4
+  call void @__cxa_end_catch() #1 [ "funclet"(token %8) ]
+  catchret from %8 to label %try.cont8
 
 rethrow4:                                         ; preds = %catch.start2
-  call void @llvm.wasm.rethrow() #3 [ "funclet"(token %10) ]
+  call void @llvm.wasm.rethrow() #3 [ "funclet"(token %8) ]
   unreachable
 
 try.cont8:                                        ; preds = %catch, %catch5
@@ -1613,25 +1599,24 @@ try.cont8:                                        ; preds = %catch, %catch5
           to label %try.cont16 unwind label %catch.dispatch9
 
 catch.dispatch9:                                  ; preds = %try.cont8
-  %17 = catchswitch within none [label %catch.start10] unwind to caller
+  %14 = catchswitch within none [label %catch.start10] unwind to caller
 
 catch.start10:                                    ; preds = %catch.dispatch9
-  %18 = catchpad within %17 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %19 = call i8* @llvm.wasm.get.exception(token %18)
-  %20 = call i32 @llvm.wasm.get.ehselector(token %18)
-  %21 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #1
-  %matches11 = icmp eq i32 %20, %21
+  %15 = catchpad within %14 [ptr @_ZTIi]
+  %16 = call ptr @llvm.wasm.get.exception(token %15)
+  %17 = call i32 @llvm.wasm.get.ehselector(token %15)
+  %18 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #1
+  %matches11 = icmp eq i32 %17, %18
   br i1 %matches11, label %catch13, label %rethrow12
 
 catch13:                                          ; preds = %catch.start10
-  %22 = call i8* @__cxa_begin_catch(i8* %19) #1 [ "funclet"(token %18) ]
-  %23 = bitcast i8* %22 to i32*
-  %24 = load i32, i32* %23, align 4
-  call void @__cxa_end_catch() #1 [ "funclet"(token %18) ]
-  catchret from %18 to label %try.cont16
+  %19 = call ptr @__cxa_begin_catch(ptr %16) #1 [ "funclet"(token %15) ]
+  %20 = load i32, ptr %19, align 4
+  call void @__cxa_end_catch() #1 [ "funclet"(token %15) ]
+  catchret from %15 to label %try.cont16
 
 rethrow12:                                        ; preds = %catch.start10
-  call void @llvm.wasm.rethrow() #3 [ "funclet"(token %18) ]
+  call void @llvm.wasm.rethrow() #3 [ "funclet"(token %15) ]
   unreachable
 
 try.cont16:                                       ; preds = %try.cont8, %catch13
@@ -1657,35 +1642,35 @@ declare void @nothrow(i32) #0
 declare i32 @nothrow_i32() #0
 
 ; Function Attrs: nounwind
-declare %class.Object* @_ZN6ObjectD2Ev(%class.Object* returned) #0
- at _ZTI7MyClass = external constant { i8*, i8* }, align 4
+declare ptr @_ZN6ObjectD2Ev(ptr returned) #0
+ at _ZTI7MyClass = external constant { ptr, ptr }, align 4
 ; Function Attrs: nounwind
-declare %class.MyClass* @_ZN7MyClassD2Ev(%class.MyClass* returned) #0
+declare ptr @_ZN7MyClassD2Ev(ptr returned) #0
 ; Function Attrs: nounwind
-declare %class.MyClass* @_ZN7MyClassC2ERKS_(%class.MyClass* returned, %class.MyClass* dereferenceable(4)) #0
+declare ptr @_ZN7MyClassC2ERKS_(ptr returned, ptr dereferenceable(4)) #0
 
 declare i32 @__gxx_wasm_personality_v0(...)
 ; Function Attrs: nounwind
-declare i8* @llvm.wasm.get.exception(token) #0
+declare ptr @llvm.wasm.get.exception(token) #0
 ; Function Attrs: nounwind
 declare i32 @llvm.wasm.get.ehselector(token) #0
-declare i8* @__cxa_allocate_exception(i32) #0
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare ptr @__cxa_allocate_exception(i32) #0
+declare void @__cxa_throw(ptr, ptr, ptr)
 ; Function Attrs: noreturn
 declare void @llvm.wasm.rethrow() #1
 ; Function Attrs: nounwind
-declare i32 @llvm.eh.typeid.for(i8*) #0
+declare i32 @llvm.eh.typeid.for(ptr) #0
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 declare void @__cxa_end_catch()
-declare i8* @__cxa_get_exception_ptr(i8*)
+declare ptr @__cxa_get_exception_ptr(ptr)
 declare void @_ZSt9terminatev()
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg) #0
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) #0
 ; Function Attrs: nounwind
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1 immarg) #0
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1 immarg) #0
 ; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg) #0
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) #0
 
 attributes #0 = { nounwind }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/WebAssembly/cfi.ll b/llvm/test/CodeGen/WebAssembly/cfi.ll
index 900ead320514..b658cc053bcb 100644
--- a/llvm/test/CodeGen/WebAssembly/cfi.ll
+++ b/llvm/test/CodeGen/WebAssembly/cfi.ll
@@ -5,7 +5,7 @@
 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
 target triple = "wasm32-unknown-unknown"
 
- at 0 = private unnamed_addr constant [2 x void (...)*] [void (...)* bitcast (void ()* @f to void (...)*), void (...)* bitcast (void ()* @g to void (...)*)], align 16
+ at 0 = private unnamed_addr constant [2 x ptr] [ptr @f, ptr @g], align 16
 
 ; CHECK-LABEL: h:
 ; CHECK-NOT: .indidx
@@ -28,15 +28,15 @@ define void @g() !type !1 {
 !0 = !{i32 0, !"typeid1"}
 !1 = !{i32 0, !"typeid2"}
 
-declare i1 @llvm.type.test(i8* %ptr, metadata %bitset) nounwind readnone
+declare i1 @llvm.type.test(ptr %ptr, metadata %bitset) nounwind readnone
 declare void @llvm.trap() nounwind noreturn
 
 ; CHECK-LABEL: foo:
 ; CHECK: br_if
 ; CHECK: br_if
 ; CHECK: unreachable
-define i1 @foo(i8* %p) {
-  %x = call i1 @llvm.type.test(i8* %p, metadata !"typeid1")
+define i1 @foo(ptr %p) {
+  %x = call i1 @llvm.type.test(ptr %p, metadata !"typeid1")
   br i1 %x, label %contx, label %trap
 
 trap:
@@ -44,7 +44,7 @@ trap:
   unreachable
 
 contx:
-  %y = call i1 @llvm.type.test(i8* %p, metadata !"typeid2")
+  %y = call i1 @llvm.type.test(ptr %p, metadata !"typeid2")
   br i1 %y, label %conty, label %trap
 
 conty:

diff  --git a/llvm/test/CodeGen/WebAssembly/clear-cache.ll b/llvm/test/CodeGen/WebAssembly/clear-cache.ll
index 0fc379cb8ac9..e9a8743446f5 100644
--- a/llvm/test/CodeGen/WebAssembly/clear-cache.ll
+++ b/llvm/test/CodeGen/WebAssembly/clear-cache.ll
@@ -3,10 +3,10 @@
 target triple = "wasm32-unknown-unknown"
 
 ; CHECK: LLVM ERROR: llvm.clear_cache is not supported on wasm
-define void @clear_cache(i8* %begin, i8* %end) {
+define void @clear_cache(ptr %begin, ptr %end) {
 entry:
-  call void @llvm.clear_cache(i8* %begin, i8* %end)
+  call void @llvm.clear_cache(ptr %begin, ptr %end)
   ret void
 }
 
-declare void @llvm.clear_cache(i8*, i8*)
+declare void @llvm.clear_cache(ptr, ptr)

diff  --git a/llvm/test/CodeGen/WebAssembly/dead-vreg.ll b/llvm/test/CodeGen/WebAssembly/dead-vreg.ll
index 61023dcf1275..bd4a4b40ee67 100644
--- a/llvm/test/CodeGen/WebAssembly/dead-vreg.ll
+++ b/llvm/test/CodeGen/WebAssembly/dead-vreg.ll
@@ -4,7 +4,7 @@
 
 target triple = "wasm32-unknown-unknown"
 
-define void @foo(i32* nocapture %a, i32 %w, i32 %h) {
+define void @foo(ptr nocapture %a, i32 %w, i32 %h) {
 ; CHECK-LABEL: foo:
 ; CHECK-NEXT: .functype foo (i32, i32, i32) -> (){{$}}
 ; CHECK-NEXT: .local i32, i32, i32, i32, i32, i32{{$}}
@@ -28,8 +28,8 @@ for.body.3:
   %x.018 = phi i32 [ 0, %for.body.3.lr.ph ], [ %inc, %for.body.3 ]
   %mul = mul nsw i32 %x.018, %y.020
   %add = add nsw i32 %x.018, %mul4
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %add
-  store i32 %mul, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %add
+  store i32 %mul, ptr %arrayidx, align 4
   %inc = add nuw nsw i32 %x.018, 1
   %exitcond = icmp eq i32 %inc, %w
   br i1 %exitcond, label %for.inc.5.loopexit, label %for.body.3

diff  --git a/llvm/test/CodeGen/WebAssembly/eh-lsda.ll b/llvm/test/CodeGen/WebAssembly/eh-lsda.ll
index 78bfe5be2d96..ce55f7620e14 100644
--- a/llvm/test/CodeGen/WebAssembly/eh-lsda.ll
+++ b/llvm/test/CodeGen/WebAssembly/eh-lsda.ll
@@ -5,9 +5,9 @@
 ; RUN: llc < %s --mtriple=wasm32-unknown-emscripten -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-eh -exception-model=wasm -mattr=+exception-handling -relocation-model=pic | FileCheck %s -check-prefixes=CHECK,PIC -DPTR=32
 ; RUN: llc < %s --mtriple=wasm64-unknown-emscripten -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-eh -exception-model=wasm -mattr=+exception-handling -relocation-model=pic | FileCheck %s -check-prefixes=CHECK,PIC -DPTR=64
 
- at _ZTIi = external constant i8*
- at _ZTIf = external constant i8*
- at _ZTId = external constant i8*
+ at _ZTIi = external constant ptr
+ at _ZTIf = external constant ptr
+ at _ZTId = external constant ptr
 
 ; Single catch (...) does not need an exception table.
 ;
@@ -17,7 +17,7 @@
 ; }
 ; CHECK-LABEL: test0:
 ; CHECK-NOT: GCC_except_table
-define void @test0() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test0() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @may_throw()
           to label %try.cont unwind label %catch.dispatch
@@ -26,10 +26,10 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -128,7 +128,7 @@ try.cont:                                         ; preds = %entry, %catch.start
 ; CHECK-NEXT:   .p2align  2
 ; CHECK-NEXT: .LGCC_except_table_end[[END:[0-9]+]]:
 ; CHECK-NEXT:   .size  GCC_except_table[[START]], .LGCC_except_table_end[[END]]-GCC_except_table[[START]]
-define void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test1() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @may_throw()
           to label %try.cont unwind label %catch.dispatch
@@ -137,17 +137,16 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*), i8* bitcast (i8** @_ZTIf to i8*), i8* bitcast (i8** @_ZTId to i8*), i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi, ptr @_ZTIf, ptr @_ZTId, ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
   %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch10, label %catch.fallthrough
 
 catch10:                                          ; preds = %catch.start
-  %5 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
-  %6 = bitcast i8* %5 to i32*
-  %7 = load i32, i32* %6, align 4
+  %5 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
+  %6 = load i32, ptr %5, align 4
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -156,89 +155,84 @@ try.cont:                                         ; preds = %entry, %catch, %cat
           to label %try.cont23 unwind label %catch.dispatch14
 
 catch.dispatch14:                                 ; preds = %try.cont
-  %8 = catchswitch within none [label %catch.start15] unwind to caller
+  %7 = catchswitch within none [label %catch.start15] unwind to caller
 
 catch.start15:                                    ; preds = %catch.dispatch14
-  %9 = catchpad within %8 [i8* bitcast (i8** @_ZTId to i8*), i8* null]
-  %10 = call i8* @llvm.wasm.get.exception(token %9)
-  %11 = call i32 @llvm.wasm.get.ehselector(token %9)
-  %12 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTId to i8*))
-  %matches16 = icmp eq i32 %11, %12
-  %13 = call i8* @__cxa_begin_catch(i8* %10) [ "funclet"(token %9) ]
+  %8 = catchpad within %7 [ptr @_ZTId, ptr null]
+  %9 = call ptr @llvm.wasm.get.exception(token %8)
+  %10 = call i32 @llvm.wasm.get.ehselector(token %8)
+  %11 = call i32 @llvm.eh.typeid.for(ptr @_ZTId)
+  %matches16 = icmp eq i32 %10, %11
+  %12 = call ptr @__cxa_begin_catch(ptr %9) [ "funclet"(token %8) ]
   br i1 %matches16, label %catch20, label %catch17
 
 catch20:                                          ; preds = %catch.start15
-  %14 = bitcast i8* %13 to double*
-  %15 = load double, double* %14, align 8
-  call void @__cxa_end_catch() [ "funclet"(token %9) ]
-  catchret from %9 to label %try.cont23
+  %13 = load double, ptr %12, align 8
+  call void @__cxa_end_catch() [ "funclet"(token %8) ]
+  catchret from %8 to label %try.cont23
 
 try.cont23:                                       ; preds = %try.cont, %catch17, %catch20
   invoke void @may_throw()
           to label %try.cont36 unwind label %catch.dispatch25
 
 catch.dispatch25:                                 ; preds = %try.cont23
-  %16 = catchswitch within none [label %catch.start26] unwind to caller
+  %14 = catchswitch within none [label %catch.start26] unwind to caller
 
 catch.start26:                                    ; preds = %catch.dispatch25
-  %17 = catchpad within %16 [i8* bitcast (i8** @_ZTIi to i8*), i8* bitcast (i8** @_ZTIf to i8*)]
-  %18 = call i8* @llvm.wasm.get.exception(token %17)
-  %19 = call i32 @llvm.wasm.get.ehselector(token %17)
-  %20 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
-  %matches27 = icmp eq i32 %19, %20
+  %15 = catchpad within %14 [ptr @_ZTIi, ptr @_ZTIf]
+  %16 = call ptr @llvm.wasm.get.exception(token %15)
+  %17 = call i32 @llvm.wasm.get.ehselector(token %15)
+  %18 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
+  %matches27 = icmp eq i32 %17, %18
   br i1 %matches27, label %catch33, label %catch.fallthrough28
 
 catch33:                                          ; preds = %catch.start26
-  %21 = call i8* @__cxa_begin_catch(i8* %18) [ "funclet"(token %17) ]
-  %22 = bitcast i8* %21 to i32*
-  %23 = load i32, i32* %22, align 4
-  call void @__cxa_end_catch() [ "funclet"(token %17) ]
-  catchret from %17 to label %try.cont36
+  %19 = call ptr @__cxa_begin_catch(ptr %16) [ "funclet"(token %15) ]
+  %20 = load i32, ptr %19, align 4
+  call void @__cxa_end_catch() [ "funclet"(token %15) ]
+  catchret from %15 to label %try.cont36
 
 catch.fallthrough28:                              ; preds = %catch.start26
-  %24 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIf to i8*))
-  %matches29 = icmp eq i32 %19, %24
+  %21 = call i32 @llvm.eh.typeid.for(ptr @_ZTIf)
+  %matches29 = icmp eq i32 %17, %21
   br i1 %matches29, label %catch30, label %rethrow
 
 catch30:                                          ; preds = %catch.fallthrough28
-  %25 = call i8* @__cxa_begin_catch(i8* %18) [ "funclet"(token %17) ]
-  %26 = bitcast i8* %25 to float*
-  %27 = load float, float* %26, align 4
-  call void @__cxa_end_catch() [ "funclet"(token %17) ]
-  catchret from %17 to label %try.cont36
+  %22 = call ptr @__cxa_begin_catch(ptr %16) [ "funclet"(token %15) ]
+  %23 = load float, ptr %22, align 4
+  call void @__cxa_end_catch() [ "funclet"(token %15) ]
+  catchret from %15 to label %try.cont36
 
 rethrow:                                          ; preds = %catch.fallthrough28
-  call void @__cxa_rethrow() [ "funclet"(token %17) ]
+  call void @__cxa_rethrow() [ "funclet"(token %15) ]
   unreachable
 
 try.cont36:                                       ; preds = %try.cont23, %catch30, %catch33
   ret void
 
 catch17:                                          ; preds = %catch.start15
-  call void @__cxa_end_catch() [ "funclet"(token %9) ]
-  catchret from %9 to label %try.cont23
+  call void @__cxa_end_catch() [ "funclet"(token %8) ]
+  catchret from %8 to label %try.cont23
 
 catch.fallthrough:                                ; preds = %catch.start
-  %28 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIf to i8*))
-  %matches1 = icmp eq i32 %3, %28
+  %24 = call i32 @llvm.eh.typeid.for(ptr @_ZTIf)
+  %matches1 = icmp eq i32 %3, %24
   br i1 %matches1, label %catch7, label %catch.fallthrough2
 
 catch7:                                           ; preds = %catch.fallthrough
-  %29 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
-  %30 = bitcast i8* %29 to float*
-  %31 = load float, float* %30, align 4
+  %25 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
+  %26 = load float, ptr %25, align 4
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
 catch.fallthrough2:                               ; preds = %catch.fallthrough
-  %32 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTId to i8*))
-  %matches3 = icmp eq i32 %3, %32
-  %33 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %27 = call i32 @llvm.eh.typeid.for(ptr @_ZTId)
+  %matches3 = icmp eq i32 %3, %27
+  %28 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   br i1 %matches3, label %catch4, label %catch
 
 catch4:                                           ; preds = %catch.fallthrough2
-  %34 = bitcast i8* %33 to double*
-  %35 = load double, double* %34, align 8
+  %29 = load double, ptr %28, align 8
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -249,13 +243,13 @@ catch:                                            ; preds = %catch.fallthrough2
 
 declare void @may_throw()
 ; Function Attrs: nounwind
-declare i32 @llvm.eh.typeid.for(i8*) #0
+declare i32 @llvm.eh.typeid.for(ptr) #0
 ; Function Attrs: nounwind
-declare i8* @llvm.wasm.get.exception(token) #0
+declare ptr @llvm.wasm.get.exception(token) #0
 ; Function Attrs: nounwind
 declare i32 @llvm.wasm.get.ehselector(token) #0
 declare void @__cxa_rethrow()
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 declare void @__cxa_end_catch()
 declare i32 @__gxx_wasm_personality_v0(...)
 

diff  --git a/llvm/test/CodeGen/WebAssembly/exception.ll b/llvm/test/CodeGen/WebAssembly/exception.ll
index 596872ce2d62..dfa33f95f37b 100644
--- a/llvm/test/CodeGen/WebAssembly/exception.ll
+++ b/llvm/test/CodeGen/WebAssembly/exception.ll
@@ -6,15 +6,15 @@ target triple = "wasm32-unknown-unknown"
 
 %struct.Temp = type { i8 }
 
- at _ZTIi = external dso_local constant i8*
+ at _ZTIi = external dso_local constant ptr
 
 ; CHECK: .tagtype  __cpp_exception i32
 
 ; CHECK-LABEL: test_throw:
 ; CHECK:     throw __cpp_exception, $0
 ; CHECK-NOT: unreachable
-define void @test_throw(i8* %p) {
-  call void @llvm.wasm.throw(i32 0, i8* %p)
+define void @test_throw(ptr %p) {
+  call void @llvm.wasm.throw(i32 0, ptr %p)
   ret void
 }
 
@@ -44,7 +44,7 @@ define void @test_throw(i8* %p) {
 ; CHECK:       end_block
 ; CHECK:       rethrow   0
 ; CHECK:     end_try
-define void @test_catch() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test_catch() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %try.cont unwind label %catch.dispatch
@@ -53,15 +53,15 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
   %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.start
-  %5 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %5 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -92,19 +92,19 @@ try.cont:                                         ; preds = %catch, %entry
 ; CHECK:   call      $drop=, _ZN4TempD2Ev
 ; CHECK:   rethrow   0
 ; CHECK: end_try
-define void @test_cleanup() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test_cleanup() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %t = alloca %struct.Temp, align 1
   invoke void @foo()
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %call = call %struct.Temp* @_ZN4TempD2Ev(%struct.Temp* %t)
+  %call = call ptr @_ZN4TempD2Ev(ptr %t)
   ret void
 
 ehcleanup:                                        ; preds = %entry
   %0 = cleanuppad within none []
-  %call1 = call %struct.Temp* @_ZN4TempD2Ev(%struct.Temp* %t) [ "funclet"(token %0) ]
+  %call1 = call ptr @_ZN4TempD2Ev(ptr %t) [ "funclet"(token %0) ]
   cleanupret from %0 unwind to caller
 }
 
@@ -138,7 +138,7 @@ ehcleanup:                                        ; preds = %entry
 ; CHECK:   end_try
 ; CHECK:   call      __cxa_end_catch
 ; CHECK: end_try
-define void @test_terminatepad() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test_terminatepad() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %try.cont unwind label %catch.dispatch
@@ -147,10 +147,10 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   invoke void @foo() [ "funclet"(token %1) ]
           to label %invoke.cont1 unwind label %ehcleanup
 
@@ -217,10 +217,10 @@ terminate:                                        ; preds = %ehcleanup
 ; CHECK-NOT:   global.set  __stack_pointer, $pop{{.+}}
 ; CHECK:       call      __cxa_end_catch
 ; CHECK:     end_try
-define void @test_no_prolog_epilog_in_ehpad() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test_no_prolog_epilog_in_ehpad() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %stack_var = alloca i32, align 4
-  call void @bar(i32* %stack_var)
+  call void @bar(ptr %stack_var)
   invoke void @foo()
           to label %try.cont unwind label %catch.dispatch
 
@@ -228,17 +228,16 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
   %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.start
-  %5 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
-  %6 = bitcast i8* %5 to float*
-  %7 = load float, float* %6, align 4
+  %5 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
+  %6 = load float, ptr %5, align 4
   invoke void @foo() [ "funclet"(token %1) ]
           to label %invoke.cont1 unwind label %ehcleanup
 
@@ -254,9 +253,9 @@ try.cont:                                         ; preds = %invoke.cont1, %entr
   ret void
 
 ehcleanup:                                        ; preds = %catch
-  %8 = cleanuppad within %1 []
-  call void @__cxa_end_catch() [ "funclet"(token %8) ]
-  cleanupret from %8 unwind to caller
+  %7 = cleanuppad within %1 []
+  call void @__cxa_end_catch() [ "funclet"(token %7) ]
+  cleanupret from %7 unwind to caller
 }
 
 ; When a function does not have stack-allocated objects, it does not need to
@@ -279,7 +278,7 @@ ehcleanup:                                        ; preds = %catch
 ; CHECK:     end_try
 ; CHECK-NOT: global.set  __stack_pointer
 ; CHECK:     return
-define void @test_no_sp_writeback() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test_no_sp_writeback() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %try.cont unwind label %catch.dispatch
@@ -288,10 +287,10 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -301,7 +300,7 @@ try.cont:                                         ; preds = %catch.start, %entry
 
 ; When the result of @llvm.wasm.get.exception is not used. This is created to
 ; fix a bug in LateEHPrepare and this should not crash.
-define void @test_get_exception_wo_use() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test_get_exception_wo_use() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %try.cont unwind label %catch.dispatch
@@ -310,8 +309,8 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
   catchret from %1 to label %try.cont
 
@@ -321,7 +320,7 @@ try.cont:                                         ; preds = %catch.start, %entry
 
 ; Tests a case when a cleanup region (cleanuppad ~ clanupret) contains another
 ; catchpad
-define void @test_complex_cleanup_region() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @test_complex_cleanup_region() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   invoke void @foo()
           to label %invoke.cont unwind label %ehcleanup
@@ -338,8 +337,8 @@ catch.dispatch:                                   ; preds = %ehcleanup
   %1 = catchswitch within %0 [label %catch.start] unwind label %ehcleanup.1
 
 catch.start:                                      ; preds = %catch.dispatch
-  %2 = catchpad within %1 [i8* null]
-  %3 = call i8* @llvm.wasm.get.exception(token %2)
+  %2 = catchpad within %1 [ptr null]
+  %3 = call ptr @llvm.wasm.get.exception(token %2)
   %4 = call i32 @llvm.wasm.get.ehselector(token %2)
   catchret from %2 to label %ehcleanupret
 
@@ -352,22 +351,22 @@ ehcleanupret:                                     ; preds = %catch.start, %ehcle
 }
 
 declare void @foo()
-declare void @bar(i32*)
+declare void @bar(ptr)
 declare i32 @__gxx_wasm_personality_v0(...)
 ; Function Attrs: noreturn
-declare void @llvm.wasm.throw(i32, i8*) #1
+declare void @llvm.wasm.throw(i32, ptr) #1
 ; Function Attrs: nounwind
-declare i8* @llvm.wasm.get.exception(token) #0
+declare ptr @llvm.wasm.get.exception(token) #0
 ; Function Attrs: nounwind
 declare i32 @llvm.wasm.get.ehselector(token) #0
 ; Function Attrs: noreturn
 declare void @llvm.wasm.rethrow() #1
 ; Function Attrs: nounwind
-declare i32 @llvm.eh.typeid.for(i8*) #0
-declare i8* @__cxa_begin_catch(i8*)
+declare i32 @llvm.eh.typeid.for(ptr) #0
+declare ptr @__cxa_begin_catch(ptr)
 declare void @__cxa_end_catch()
 declare void @_ZSt9terminatev()
-declare %struct.Temp* @_ZN4TempD2Ev(%struct.Temp* returned)
+declare ptr @_ZN4TempD2Ev(ptr returned)
 
 attributes #0 = { nounwind }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll b/llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll
index adbf3aa94dc0..8224c3bc4e37 100644
--- a/llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll
+++ b/llvm/test/CodeGen/WebAssembly/fast-isel-call-indirect64.ll
@@ -8,7 +8,7 @@ target triple = "wasm64"
 ; CHECK-NEXT:  i32.wrap_i64 $push[[L1:[0-9]+]]=, $pop[[L0]]
 ; CHECK-NEXT:  call_indirect $pop[[L1]]
 
-define hidden void @f(void ()* %g) {
+define hidden void @f(ptr %g) {
   call void %g()
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/fast-isel-noreg.ll b/llvm/test/CodeGen/WebAssembly/fast-isel-noreg.ll
index 919ac6815717..56e599a7132e 100644
--- a/llvm/test/CodeGen/WebAssembly/fast-isel-noreg.ll
+++ b/llvm/test/CodeGen/WebAssembly/fast-isel-noreg.ll
@@ -8,14 +8,14 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK: i32.const $push0=, 0
 define hidden i32 @a() #0 {
 entry:
-  ret i32 zext (i1 icmp eq (void (...)* inttoptr (i32 10 to void (...)*), void (...)* null) to i32)
+  ret i32 zext (i1 icmp eq (ptr inttoptr (i32 10 to ptr), ptr null) to i32)
 }
 
 ; CHECK: i32.const $push0=, 1
 ; CHECK: br_if 0, $pop0
 define hidden i32 @b() #0 {
 entry:
-  br i1 icmp eq (void (...)* inttoptr (i32 10 to void (...)*), void (...)* null), label %a, label %b
+  br i1 icmp eq (ptr inttoptr (i32 10 to ptr), ptr null), label %a, label %b
 a:
   unreachable
 b:
@@ -27,7 +27,7 @@ b:
 ; CHECK: i32.store 0($pop1), $pop2
 define hidden i32 @c() #0 {
 entry:
-  store i32 zext (i1 icmp eq (void (...)* inttoptr (i32 10 to void (...)*), void (...)* null) to i32), i32* inttoptr (i32 0 to i32 *)
+  store i32 zext (i1 icmp eq (ptr inttoptr (i32 10 to ptr), ptr null) to i32), ptr inttoptr (i32 0 to ptr)
   ret i32 0
 }
 
@@ -44,7 +44,7 @@ entry:
 ; CHECK: br_if 0, $pop{{[0-9]+}}
 define hidden i32 @d() #0 {
 entry:
-  %t = icmp slt i8 ptrtoint (void ()* @addr to i8), 64
+  %t = icmp slt i8 ptrtoint (ptr @addr to i8), 64
   br i1 %t, label %a, label %b
 a:
   unreachable
@@ -63,7 +63,7 @@ b:
 ; CHECK: br_if 0, $pop{{[0-9]+}}
 define hidden i32 @e() #0 {
 entry:
-  %t = icmp ult i8 ptrtoint (void ()* @addr to i8), 64
+  %t = icmp ult i8 ptrtoint (ptr @addr to i8), 64
   br i1 %t, label %a, label %b
 a:
   unreachable
@@ -78,7 +78,7 @@ b:
 ; CHECK: i32.shr_s
 define hidden i32 @f() #0 {
 entry:
-  %t = sext i8 ptrtoint (void ()* @addr to i8) to i32
+  %t = sext i8 ptrtoint (ptr @addr to i8) to i32
   ret i32 %t
 }
 
@@ -87,7 +87,7 @@ entry:
 ; CHECK: i32.and
 define hidden i32 @g() #0 {
 entry:
-  %t = zext i8 ptrtoint (void ()* @addr to i8) to i32
+  %t = zext i8 ptrtoint (ptr @addr to i8) to i32
   ret i32 %t
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/fast-isel-pr47040.ll b/llvm/test/CodeGen/WebAssembly/fast-isel-pr47040.ll
index 61dfeecb00d1..6a1304cb9a93 100644
--- a/llvm/test/CodeGen/WebAssembly/fast-isel-pr47040.ll
+++ b/llvm/test/CodeGen/WebAssembly/fast-isel-pr47040.ll
@@ -13,9 +13,9 @@ target triple = "wasm32-unknown-unknown"
 
 define i32 @foo() {
   %stack_addr = alloca i32
-  %stack_i = ptrtoint i32* %stack_addr to i32
+  %stack_i = ptrtoint ptr %stack_addr to i32
   %added = add i32 %stack_i, undef
-  %added_addr = inttoptr i32 %added to i32*
-  %ret = load i32, i32* %added_addr
+  %added_addr = inttoptr i32 %added to ptr
+  %ret = load i32, ptr %added_addr
   ret i32 %ret
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/fast-isel.ll b/llvm/test/CodeGen/WebAssembly/fast-isel.ll
index cd134b100e05..8f1761b3eb66 100644
--- a/llvm/test/CodeGen/WebAssembly/fast-isel.ll
+++ b/llvm/test/CodeGen/WebAssembly/fast-isel.ll
@@ -50,29 +50,29 @@ define double @bitcast_f64_i64(i64 %x) {
 ; Do fold offsets into geps.
 ; CHECK-LABEL: do_fold_offset_into_gep:
 ; CHECK: i64.load $push{{[0-9]+}}=, 8($0)
-define i64 @do_fold_offset_into_gep(i64* %p) {
+define i64 @do_fold_offset_into_gep(ptr %p) {
 bb:
-  %tmp = getelementptr inbounds i64, i64* %p, i32 1
-  %tmp2 = load i64, i64* %tmp, align 8
+  %tmp = getelementptr inbounds i64, ptr %p, i32 1
+  %tmp2 = load i64, ptr %tmp, align 8
   ret i64 %tmp2
 }
 
 ; Don't fold negative offsets into geps.
 ; CHECK-LABEL: dont_fold_negative_offset:
 ; CHECK: i64.load $push{{[0-9]+}}=, 0($pop{{[0-9]+}})
-define i64 @dont_fold_negative_offset(i64* %p) {
+define i64 @dont_fold_negative_offset(ptr %p) {
 bb:
-  %tmp = getelementptr inbounds i64, i64* %p, i32 -1
-  %tmp2 = load i64, i64* %tmp, align 8
+  %tmp = getelementptr inbounds i64, ptr %p, i32 -1
+  %tmp2 = load i64, ptr %tmp, align 8
   ret i64 %tmp2
 }
 
 ; Don't fold non-inbounds geps.
 ; CHECK-LABEL: dont_fold_non_inbounds_gep:
 ; CHECK: i64.load $push{{[0-9]+}}=, 0($pop{{[0-9]+}})
-define i64 @dont_fold_non_inbounds_gep(i64* %p) {
+define i64 @dont_fold_non_inbounds_gep(ptr %p) {
 bb:
-  %tmp = getelementptr i64, i64* %p, i32 1
-  %tmp2 = load i64, i64* %tmp, align 8
+  %tmp = getelementptr i64, ptr %p, i32 1
+  %tmp2 = load i64, ptr %tmp, align 8
   ret i64 %tmp2
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/function-addr-offset.ll b/llvm/test/CodeGen/WebAssembly/function-addr-offset.ll
index f7c9285ab4ce..f4baa2f84e9b 100644
--- a/llvm/test/CodeGen/WebAssembly/function-addr-offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/function-addr-offset.ll
@@ -7,9 +7,9 @@
 target triple = "wasm32-unknown-unknown"
 
 ; 'hidden' here should be present to reproduce the bug
-declare hidden void @ham(i8*)
+declare hidden void @ham(ptr)
 
-define void @bar(i8* %ptr) {
+define void @bar(ptr %ptr) {
 bb1:
   br i1 undef, label %bb3, label %bb2
 
@@ -21,7 +21,7 @@ bb2:
   ; CHECK:      i32.const  ham
   ; CHECK-NEXT: i32.const  1
   ; CHECK-NEXT: i32.add
-  switch i32 ptrtoint (void (i8*)* @ham to i32), label %bb4 [
+  switch i32 ptrtoint (ptr @ham to i32), label %bb4 [
     i32 -1, label %bb3
     i32 0, label %bb3
   ]
@@ -30,6 +30,6 @@ bb3:
   unreachable
 
 bb4:
-  %tmp = load i8, i8* %ptr
+  %tmp = load i8, ptr %ptr
   unreachable
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll b/llvm/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll
index 235078acf5b2..473dcdd0c0e5 100644
--- a/llvm/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll
+++ b/llvm/test/CodeGen/WebAssembly/function-bitcasts-varargs.ll
@@ -7,8 +7,8 @@ target triple = "wasm32-unknown-unknown"
 
 define void @callWithArgs() {
 entry:
-  call void bitcast (void (...)* @underspecified to void (i32, i32)*)(i32 0, i32 1)
-  call void(...) bitcast (void (i32, i32)* @specified to void (...)*)(i32 0, i32 1)
+  call void @underspecified(i32 0, i32 1)
+  call void(...) @specified(i32 0, i32 1)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/function-pointer64.ll b/llvm/test/CodeGen/WebAssembly/function-pointer64.ll
index f501ed97930d..c7c90f6b7ac2 100644
--- a/llvm/test/CodeGen/WebAssembly/function-pointer64.ll
+++ b/llvm/test/CodeGen/WebAssembly/function-pointer64.ll
@@ -11,7 +11,7 @@ entry:
   ret void
 }
 
-define void @foo(void (i32)* %fp) {
+define void @foo(ptr %fp) {
 entry:
   call void %fp(i32 1)
   ret void
@@ -19,12 +19,12 @@ entry:
 
 define void @test() {
 entry:
-  call void @foo(void (i32)* @bar)
-  store void (i32)* @bar, void (i32)** @fptr
+  call void @foo(ptr @bar)
+  store ptr @bar, ptr @fptr
   ret void
 }
 
- at fptr = global void (i32)* @bar
+ at fptr = global ptr @bar
 
 ; For simplicity (and compatibility with UB C/C++ code) we keep all types
 ; of pointers the same size, so function pointers (which are 32-bit indices

diff  --git a/llvm/test/CodeGen/WebAssembly/global-get.ll b/llvm/test/CodeGen/WebAssembly/global-get.ll
index 07477e690c68..9d22e3ab1b2b 100644
--- a/llvm/test/CodeGen/WebAssembly/global-get.ll
+++ b/llvm/test/CodeGen/WebAssembly/global-get.ll
@@ -13,7 +13,7 @@ define i32 @return_i32_global() {
 ; CHECK-NEXT: functype       return_i32_global () -> (i32)
 ; CHECK-NEXT: global.get i32_global
 ; CHECK-NEXT: end_function
-  %v = load i32, i32 addrspace(1)* @i32_global
+  %v = load i32, ptr addrspace(1) @i32_global
   ret i32 %v
 }
 
@@ -22,7 +22,7 @@ define i64 @return_i64_global() {
 ; CHECK-NEXT: functype       return_i64_global () -> (i64)
 ; CHECK-NEXT: global.get i64_global
 ; CHECK-NEXT: end_function
-  %v = load i64, i64 addrspace(1)* @i64_global
+  %v = load i64, ptr addrspace(1) @i64_global
   ret i64 %v
 }
 
@@ -31,7 +31,7 @@ define float @return_f32_global() {
 ; CHECK-NEXT: functype       return_f32_global () -> (f32)
 ; CHECK-NEXT: global.get f32_global
 ; CHECK-NEXT: end_function
-  %v = load float, float addrspace(1)* @f32_global
+  %v = load float, ptr addrspace(1) @f32_global
   ret float %v
 }
 
@@ -40,7 +40,7 @@ define double @return_f64_global() {
 ; CHECK-NEXT: functype       return_f64_global () -> (f64)
 ; CHECK-NEXT: global.get f64_global
 ; CHECK-NEXT: end_function
-  %v = load double, double addrspace(1)* @f64_global
+  %v = load double, ptr addrspace(1) @f64_global
   ret double %v
 }
 
@@ -49,7 +49,7 @@ define i32 @return_extern_i32_global() {
 ; CHECK-NEXT: functype       return_extern_i32_global () -> (i32)
 ; CHECK-NEXT: global.get i32_external_used
 ; CHECK-NEXT: end_function
-  %v = load i32, i32 addrspace(1)* @i32_external_used
+  %v = load i32, ptr addrspace(1) @i32_external_used
   ret i32 %v
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/global-set.ll b/llvm/test/CodeGen/WebAssembly/global-set.ll
index 44a9acebe981..7db374528fe9 100644
--- a/llvm/test/CodeGen/WebAssembly/global-set.ll
+++ b/llvm/test/CodeGen/WebAssembly/global-set.ll
@@ -11,7 +11,7 @@ define void @set_i32_global(i32 %v) {
 ; CHECK-NEXT: local.get 0
 ; CHECK-NEXT: global.set i32_global
 ; CHECK-NEXT: end_function
-  store i32 %v, i32 addrspace(1)* @i32_global
+  store i32 %v, ptr addrspace(1) @i32_global
   ret void
 }
 
@@ -21,7 +21,7 @@ define void @set_i64_global(i64 %v) {
 ; CHECK-NEXT: local.get 0
 ; CHECK-NEXT: global.set i64_global
 ; CHECK-NEXT: end_function
-  store i64 %v, i64 addrspace(1)* @i64_global
+  store i64 %v, ptr addrspace(1) @i64_global
   ret void
 }
 
@@ -31,7 +31,7 @@ define void @set_f32_global(float %v) {
 ; CHECK-NEXT: local.get 0
 ; CHECK-NEXT: global.set f32_global
 ; CHECK-NEXT: end_function
-  store float %v, float addrspace(1)* @f32_global
+  store float %v, ptr addrspace(1) @f32_global
   ret void
 }
 
@@ -41,7 +41,7 @@ define void @set_f64_global(double %v) {
 ; CHECK-NEXT: local.get 0
 ; CHECK-NEXT: global.set f64_global
 ; CHECK-NEXT: end_function
-  store double %v, double addrspace(1)* @f64_global
+  store double %v, ptr addrspace(1) @f64_global
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/global_dtors.ll b/llvm/test/CodeGen/WebAssembly/global_dtors.ll
index 3abe80deb569..be3b7ccf777f 100644
--- a/llvm/test/CodeGen/WebAssembly/global_dtors.ll
+++ b/llvm/test/CodeGen/WebAssembly/global_dtors.ll
@@ -5,4 +5,4 @@ target triple = "wasm32-unknown-unknown"
 ; Check that we do not crash when attempting to lower away
 ; global_dtors without a definition.
 
- at llvm.global_dtors = external global [2 x { i32, void ()*, i8* }]
+ at llvm.global_dtors = external global [2 x { i32, ptr, ptr }]

diff  --git a/llvm/test/CodeGen/WebAssembly/globl.ll b/llvm/test/CodeGen/WebAssembly/globl.ll
index 6762f11bb344..0bfa3712deb4 100644
--- a/llvm/test/CodeGen/WebAssembly/globl.ll
+++ b/llvm/test/CodeGen/WebAssembly/globl.ll
@@ -6,8 +6,8 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK: .type foo, at function
 ; CHECK-LABEL: foo:
 ; CHECK: end_function
-define i32* @foo() {
-  ret i32* @bar
+define ptr @foo() {
+  ret ptr @bar
 }
 
 ; CHECK: .type bar, at object

diff  --git a/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll b/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
index 082b1083ee55..02801c18d794 100644
--- a/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
+++ b/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
@@ -12,8 +12,8 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-NEXT: .functype ldi32_a1 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @ldi32_a1(i32 *%p) {
-  %v = load i32, i32* %p, align 1
+define i32 @ldi32_a1(ptr %p) {
+  %v = load i32, ptr %p, align 1
   ret i32 %v
 }
 
@@ -21,8 +21,8 @@ define i32 @ldi32_a1(i32 *%p) {
 ; CHECK-NEXT: .functype ldi32_a2 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @ldi32_a2(i32 *%p) {
-  %v = load i32, i32* %p, align 2
+define i32 @ldi32_a2(ptr %p) {
+  %v = load i32, ptr %p, align 2
   ret i32 %v
 }
 
@@ -32,8 +32,8 @@ define i32 @ldi32_a2(i32 *%p) {
 ; CHECK-NEXT: .functype ldi32_a4 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @ldi32_a4(i32 *%p) {
-  %v = load i32, i32* %p, align 4
+define i32 @ldi32_a4(ptr %p) {
+  %v = load i32, ptr %p, align 4
   ret i32 %v
 }
 
@@ -43,8 +43,8 @@ define i32 @ldi32_a4(i32 *%p) {
 ; CHECK-NEXT: .functype ldi32 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @ldi32(i32 *%p) {
-  %v = load i32, i32* %p
+define i32 @ldi32(ptr %p) {
+  %v = load i32, ptr %p
   ret i32 %v
 }
 
@@ -54,8 +54,8 @@ define i32 @ldi32(i32 *%p) {
 ; CHECK-NEXT: .functype ldi32_a8 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @ldi32_a8(i32 *%p) {
-  %v = load i32, i32* %p, align 8
+define i32 @ldi32_a8(ptr %p) {
+  %v = load i32, ptr %p, align 8
   ret i32 %v
 }
 
@@ -67,8 +67,8 @@ define i32 @ldi32_a8(i32 *%p) {
 ; CHECK-NEXT: .functype ldi8_a1 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i8 @ldi8_a1(i8 *%p) {
-  %v = load i8, i8* %p, align 1
+define i8 @ldi8_a1(ptr %p) {
+  %v = load i8, ptr %p, align 1
   ret i8 %v
 }
 
@@ -76,8 +76,8 @@ define i8 @ldi8_a1(i8 *%p) {
 ; CHECK-NEXT: .functype ldi8_a2 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i8 @ldi8_a2(i8 *%p) {
-  %v = load i8, i8* %p, align 2
+define i8 @ldi8_a2(ptr %p) {
+  %v = load i8, ptr %p, align 2
   ret i8 %v
 }
 
@@ -85,8 +85,8 @@ define i8 @ldi8_a2(i8 *%p) {
 ; CHECK-NEXT: .functype ldi16_a1 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load16_u $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i16 @ldi16_a1(i16 *%p) {
-  %v = load i16, i16* %p, align 1
+define i16 @ldi16_a1(ptr %p) {
+  %v = load i16, ptr %p, align 1
   ret i16 %v
 }
 
@@ -94,8 +94,8 @@ define i16 @ldi16_a1(i16 *%p) {
 ; CHECK-NEXT: .functype ldi16_a2 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load16_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i16 @ldi16_a2(i16 *%p) {
-  %v = load i16, i16* %p, align 2
+define i16 @ldi16_a2(ptr %p) {
+  %v = load i16, ptr %p, align 2
   ret i16 %v
 }
 
@@ -103,8 +103,8 @@ define i16 @ldi16_a2(i16 *%p) {
 ; CHECK-NEXT: .functype ldi16_a4 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.load16_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i16 @ldi16_a4(i16 *%p) {
-  %v = load i16, i16* %p, align 4
+define i16 @ldi16_a4(ptr %p) {
+  %v = load i16, ptr %p, align 4
   ret i16 %v
 }
 
@@ -116,8 +116,8 @@ define i16 @ldi16_a4(i16 *%p) {
 ; CHECK-NEXT: .functype sti32_a1 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store 0($0):p2align=0, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_a1(i32 *%p, i32 %v) {
-  store i32 %v, i32* %p, align 1
+define void @sti32_a1(ptr %p, i32 %v) {
+  store i32 %v, ptr %p, align 1
   ret void
 }
 
@@ -125,8 +125,8 @@ define void @sti32_a1(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype sti32_a2 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store 0($0):p2align=1, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_a2(i32 *%p, i32 %v) {
-  store i32 %v, i32* %p, align 2
+define void @sti32_a2(ptr %p, i32 %v) {
+  store i32 %v, ptr %p, align 2
   ret void
 }
 
@@ -136,8 +136,8 @@ define void @sti32_a2(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype sti32_a4 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_a4(i32 *%p, i32 %v) {
-  store i32 %v, i32* %p, align 4
+define void @sti32_a4(ptr %p, i32 %v) {
+  store i32 %v, ptr %p, align 4
   ret void
 }
 
@@ -147,8 +147,8 @@ define void @sti32_a4(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype sti32 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32(i32 *%p, i32 %v) {
-  store i32 %v, i32* %p
+define void @sti32(ptr %p, i32 %v) {
+  store i32 %v, ptr %p
   ret void
 }
 
@@ -156,8 +156,8 @@ define void @sti32(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype sti32_a8 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_a8(i32 *%p, i32 %v) {
-  store i32 %v, i32* %p, align 8
+define void @sti32_a8(ptr %p, i32 %v) {
+  store i32 %v, ptr %p, align 8
   ret void
 }
 
@@ -169,8 +169,8 @@ define void @sti32_a8(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype sti8_a1 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store8 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti8_a1(i8 *%p, i8 %v) {
-  store i8 %v, i8* %p, align 1
+define void @sti8_a1(ptr %p, i8 %v) {
+  store i8 %v, ptr %p, align 1
   ret void
 }
 
@@ -178,8 +178,8 @@ define void @sti8_a1(i8 *%p, i8 %v) {
 ; CHECK-NEXT: .functype sti8_a2 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store8 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti8_a2(i8 *%p, i8 %v) {
-  store i8 %v, i8* %p, align 2
+define void @sti8_a2(ptr %p, i8 %v) {
+  store i8 %v, ptr %p, align 2
   ret void
 }
 
@@ -187,8 +187,8 @@ define void @sti8_a2(i8 *%p, i8 %v) {
 ; CHECK-NEXT: .functype sti16_a1 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store16 0($0):p2align=0, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti16_a1(i16 *%p, i16 %v) {
-  store i16 %v, i16* %p, align 1
+define void @sti16_a1(ptr %p, i16 %v) {
+  store i16 %v, ptr %p, align 1
   ret void
 }
 
@@ -196,8 +196,8 @@ define void @sti16_a1(i16 *%p, i16 %v) {
 ; CHECK-NEXT: .functype sti16_a2 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store16 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti16_a2(i16 *%p, i16 %v) {
-  store i16 %v, i16* %p, align 2
+define void @sti16_a2(ptr %p, i16 %v) {
+  store i16 %v, ptr %p, align 2
   ret void
 }
 
@@ -205,8 +205,8 @@ define void @sti16_a2(i16 *%p, i16 %v) {
 ; CHECK-NEXT: .functype sti16_a4 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store16 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti16_a4(i16 *%p, i16 %v) {
-  store i16 %v, i16* %p, align 4
+define void @sti16_a4(ptr %p, i16 %v) {
+  store i16 %v, ptr %p, align 4
   ret void
 }
 
@@ -221,8 +221,8 @@ define void @sti16_a4(i16 *%p, i16 %v) {
 ; CHECK-NEXT: .functype ldi32_atomic_a4 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @ldi32_atomic_a4(i32 *%p) {
-  %v = load atomic i32, i32* %p seq_cst, align 4
+define i32 @ldi32_atomic_a4(ptr %p) {
+  %v = load atomic i32, ptr %p seq_cst, align 4
   ret i32 %v
 }
 
@@ -232,8 +232,8 @@ define i32 @ldi32_atomic_a4(i32 *%p) {
 ; CHECK-NEXT: .functype ldi32_atomic_a8 (i32) -> (i32){{$}}
 ; CHECK-NEXT: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @ldi32_atomic_a8(i32 *%p) {
-  %v = load atomic i32, i32* %p seq_cst, align 8
+define i32 @ldi32_atomic_a8(ptr %p) {
+  %v = load atomic i32, ptr %p seq_cst, align 8
   ret i32 %v
 }
 
@@ -245,8 +245,8 @@ define i32 @ldi32_atomic_a8(i32 *%p) {
 ; CHECK-NEXT: .functype sti32_atomic_a4 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_atomic_a4(i32 *%p, i32 %v) {
- store atomic i32 %v, i32* %p seq_cst, align 4
+define void @sti32_atomic_a4(ptr %p, i32 %v) {
+ store atomic i32 %v, ptr %p seq_cst, align 4
  ret void
 }
 
@@ -256,7 +256,7 @@ define void @sti32_atomic_a4(i32 *%p, i32 %v) {
 ; CHECK-NEXT: .functype sti32_atomic_a8 (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_atomic_a8(i32 *%p, i32 %v) {
- store atomic i32 %v, i32* %p seq_cst, align 8
+define void @sti32_atomic_a8(ptr %p, i32 %v) {
+ store atomic i32 %v, ptr %p seq_cst, align 8
  ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll b/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
index 3128180d1d62..5f484d492069 100644
--- a/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
+++ b/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
@@ -12,8 +12,8 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-NEXT: .functype ldi64_a1 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64_a1(i64 *%p) {
-  %v = load i64, i64* %p, align 1
+define i64 @ldi64_a1(ptr %p) {
+  %v = load i64, ptr %p, align 1
   ret i64 %v
 }
 
@@ -21,8 +21,8 @@ define i64 @ldi64_a1(i64 *%p) {
 ; CHECK-NEXT: .functype ldi64_a2 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64_a2(i64 *%p) {
-  %v = load i64, i64* %p, align 2
+define i64 @ldi64_a2(ptr %p) {
+  %v = load i64, ptr %p, align 2
   ret i64 %v
 }
 
@@ -30,8 +30,8 @@ define i64 @ldi64_a2(i64 *%p) {
 ; CHECK-NEXT: .functype ldi64_a4 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=2{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64_a4(i64 *%p) {
-  %v = load i64, i64* %p, align 4
+define i64 @ldi64_a4(ptr %p) {
+  %v = load i64, ptr %p, align 4
   ret i64 %v
 }
 
@@ -41,8 +41,8 @@ define i64 @ldi64_a4(i64 *%p) {
 ; CHECK-NEXT: .functype ldi64_a8 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64_a8(i64 *%p) {
-  %v = load i64, i64* %p, align 8
+define i64 @ldi64_a8(ptr %p) {
+  %v = load i64, ptr %p, align 8
   ret i64 %v
 }
 
@@ -52,8 +52,8 @@ define i64 @ldi64_a8(i64 *%p) {
 ; CHECK-NEXT: .functype ldi64 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64(i64 *%p) {
-  %v = load i64, i64* %p
+define i64 @ldi64(ptr %p) {
+  %v = load i64, ptr %p
   ret i64 %v
 }
 
@@ -63,8 +63,8 @@ define i64 @ldi64(i64 *%p) {
 ; CHECK-NEXT: .functype ldi64_a16 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64_a16(i64 *%p) {
-  %v = load i64, i64* %p, align 16
+define i64 @ldi64_a16(ptr %p) {
+  %v = load i64, ptr %p, align 16
   ret i64 %v
 }
 
@@ -76,8 +76,8 @@ define i64 @ldi64_a16(i64 *%p) {
 ; CHECK-NEXT: .functype ldi8_a1 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi8_a1(i8 *%p) {
-  %v = load i8, i8* %p, align 1
+define i64 @ldi8_a1(ptr %p) {
+  %v = load i8, ptr %p, align 1
   %w = zext i8 %v to i64
   ret i64 %w
 }
@@ -86,8 +86,8 @@ define i64 @ldi8_a1(i8 *%p) {
 ; CHECK-NEXT: .functype ldi8_a2 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi8_a2(i8 *%p) {
-  %v = load i8, i8* %p, align 2
+define i64 @ldi8_a2(ptr %p) {
+  %v = load i8, ptr %p, align 2
   %w = zext i8 %v to i64
   ret i64 %w
 }
@@ -96,8 +96,8 @@ define i64 @ldi8_a2(i8 *%p) {
 ; CHECK-NEXT: .functype ldi16_a1 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi16_a1(i16 *%p) {
-  %v = load i16, i16* %p, align 1
+define i64 @ldi16_a1(ptr %p) {
+  %v = load i16, ptr %p, align 1
   %w = zext i16 %v to i64
   ret i64 %w
 }
@@ -106,8 +106,8 @@ define i64 @ldi16_a1(i16 *%p) {
 ; CHECK-NEXT: .functype ldi16_a2 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi16_a2(i16 *%p) {
-  %v = load i16, i16* %p, align 2
+define i64 @ldi16_a2(ptr %p) {
+  %v = load i16, ptr %p, align 2
   %w = zext i16 %v to i64
   ret i64 %w
 }
@@ -116,8 +116,8 @@ define i64 @ldi16_a2(i16 *%p) {
 ; CHECK-NEXT: .functype ldi16_a4 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi16_a4(i16 *%p) {
-  %v = load i16, i16* %p, align 4
+define i64 @ldi16_a4(ptr %p) {
+  %v = load i16, ptr %p, align 4
   %w = zext i16 %v to i64
   ret i64 %w
 }
@@ -126,8 +126,8 @@ define i64 @ldi16_a4(i16 *%p) {
 ; CHECK-NEXT: .functype ldi32_a1 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi32_a1(i32 *%p) {
-  %v = load i32, i32* %p, align 1
+define i64 @ldi32_a1(ptr %p) {
+  %v = load i32, ptr %p, align 1
   %w = zext i32 %v to i64
   ret i64 %w
 }
@@ -136,8 +136,8 @@ define i64 @ldi32_a1(i32 *%p) {
 ; CHECK-NEXT: .functype ldi32_a2 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi32_a2(i32 *%p) {
-  %v = load i32, i32* %p, align 2
+define i64 @ldi32_a2(ptr %p) {
+  %v = load i32, ptr %p, align 2
   %w = zext i32 %v to i64
   ret i64 %w
 }
@@ -146,8 +146,8 @@ define i64 @ldi32_a2(i32 *%p) {
 ; CHECK-NEXT: .functype ldi32_a4 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi32_a4(i32 *%p) {
-  %v = load i32, i32* %p, align 4
+define i64 @ldi32_a4(ptr %p) {
+  %v = load i32, ptr %p, align 4
   %w = zext i32 %v to i64
   ret i64 %w
 }
@@ -156,8 +156,8 @@ define i64 @ldi32_a4(i32 *%p) {
 ; CHECK-NEXT: .functype ldi32_a8 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi32_a8(i32 *%p) {
-  %v = load i32, i32* %p, align 8
+define i64 @ldi32_a8(ptr %p) {
+  %v = load i32, ptr %p, align 8
   %w = zext i32 %v to i64
   ret i64 %w
 }
@@ -170,8 +170,8 @@ define i64 @ldi32_a8(i32 *%p) {
 ; CHECK-NEXT: .functype sti64_a1 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store 0($0):p2align=0, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64_a1(i64 *%p, i64 %v) {
-  store i64 %v, i64* %p, align 1
+define void @sti64_a1(ptr %p, i64 %v) {
+  store i64 %v, ptr %p, align 1
   ret void
 }
 
@@ -179,8 +179,8 @@ define void @sti64_a1(i64 *%p, i64 %v) {
 ; CHECK-NEXT: .functype sti64_a2 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store 0($0):p2align=1, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64_a2(i64 *%p, i64 %v) {
-  store i64 %v, i64* %p, align 2
+define void @sti64_a2(ptr %p, i64 %v) {
+  store i64 %v, ptr %p, align 2
   ret void
 }
 
@@ -188,8 +188,8 @@ define void @sti64_a2(i64 *%p, i64 %v) {
 ; CHECK-NEXT: .functype sti64_a4 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store 0($0):p2align=2, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64_a4(i64 *%p, i64 %v) {
-  store i64 %v, i64* %p, align 4
+define void @sti64_a4(ptr %p, i64 %v) {
+  store i64 %v, ptr %p, align 4
   ret void
 }
 
@@ -199,8 +199,8 @@ define void @sti64_a4(i64 *%p, i64 %v) {
 ; CHECK-NEXT: .functype sti64_a8 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64_a8(i64 *%p, i64 %v) {
-  store i64 %v, i64* %p, align 8
+define void @sti64_a8(ptr %p, i64 %v) {
+  store i64 %v, ptr %p, align 8
   ret void
 }
 
@@ -210,8 +210,8 @@ define void @sti64_a8(i64 *%p, i64 %v) {
 ; CHECK-NEXT: .functype sti64 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64(i64 *%p, i64 %v) {
-  store i64 %v, i64* %p
+define void @sti64(ptr %p, i64 %v) {
+  store i64 %v, ptr %p
   ret void
 }
 
@@ -219,8 +219,8 @@ define void @sti64(i64 *%p, i64 %v) {
 ; CHECK-NEXT: .functype sti64_a16 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64_a16(i64 *%p, i64 %v) {
-  store i64 %v, i64* %p, align 16
+define void @sti64_a16(ptr %p, i64 %v) {
+  store i64 %v, ptr %p, align 16
   ret void
 }
 
@@ -232,9 +232,9 @@ define void @sti64_a16(i64 *%p, i64 %v) {
 ; CHECK-NEXT: .functype sti8_a1 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store8 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti8_a1(i8 *%p, i64 %w) {
+define void @sti8_a1(ptr %p, i64 %w) {
   %v = trunc i64 %w to i8
-  store i8 %v, i8* %p, align 1
+  store i8 %v, ptr %p, align 1
   ret void
 }
 
@@ -242,9 +242,9 @@ define void @sti8_a1(i8 *%p, i64 %w) {
 ; CHECK-NEXT: .functype sti8_a2 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store8 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti8_a2(i8 *%p, i64 %w) {
+define void @sti8_a2(ptr %p, i64 %w) {
   %v = trunc i64 %w to i8
-  store i8 %v, i8* %p, align 2
+  store i8 %v, ptr %p, align 2
   ret void
 }
 
@@ -252,9 +252,9 @@ define void @sti8_a2(i8 *%p, i64 %w) {
 ; CHECK-NEXT: .functype sti16_a1 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store16 0($0):p2align=0, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti16_a1(i16 *%p, i64 %w) {
+define void @sti16_a1(ptr %p, i64 %w) {
   %v = trunc i64 %w to i16
-  store i16 %v, i16* %p, align 1
+  store i16 %v, ptr %p, align 1
   ret void
 }
 
@@ -262,9 +262,9 @@ define void @sti16_a1(i16 *%p, i64 %w) {
 ; CHECK-NEXT: .functype sti16_a2 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store16 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti16_a2(i16 *%p, i64 %w) {
+define void @sti16_a2(ptr %p, i64 %w) {
   %v = trunc i64 %w to i16
-  store i16 %v, i16* %p, align 2
+  store i16 %v, ptr %p, align 2
   ret void
 }
 
@@ -272,9 +272,9 @@ define void @sti16_a2(i16 *%p, i64 %w) {
 ; CHECK-NEXT: .functype sti16_a4 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store16 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti16_a4(i16 *%p, i64 %w) {
+define void @sti16_a4(ptr %p, i64 %w) {
   %v = trunc i64 %w to i16
-  store i16 %v, i16* %p, align 4
+  store i16 %v, ptr %p, align 4
   ret void
 }
 
@@ -282,9 +282,9 @@ define void @sti16_a4(i16 *%p, i64 %w) {
 ; CHECK-NEXT: .functype sti32_a1 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store32 0($0):p2align=0, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_a1(i32 *%p, i64 %w) {
+define void @sti32_a1(ptr %p, i64 %w) {
   %v = trunc i64 %w to i32
-  store i32 %v, i32* %p, align 1
+  store i32 %v, ptr %p, align 1
   ret void
 }
 
@@ -292,9 +292,9 @@ define void @sti32_a1(i32 *%p, i64 %w) {
 ; CHECK-NEXT: .functype sti32_a2 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store32 0($0):p2align=1, $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_a2(i32 *%p, i64 %w) {
+define void @sti32_a2(ptr %p, i64 %w) {
   %v = trunc i64 %w to i32
-  store i32 %v, i32* %p, align 2
+  store i32 %v, ptr %p, align 2
   ret void
 }
 
@@ -302,9 +302,9 @@ define void @sti32_a2(i32 *%p, i64 %w) {
 ; CHECK-NEXT: .functype sti32_a4 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store32 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_a4(i32 *%p, i64 %w) {
+define void @sti32_a4(ptr %p, i64 %w) {
   %v = trunc i64 %w to i32
-  store i32 %v, i32* %p, align 4
+  store i32 %v, ptr %p, align 4
   ret void
 }
 
@@ -312,9 +312,9 @@ define void @sti32_a4(i32 *%p, i64 %w) {
 ; CHECK-NEXT: .functype sti32_a8 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.store32 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32_a8(i32 *%p, i64 %w) {
+define void @sti32_a8(ptr %p, i64 %w) {
   %v = trunc i64 %w to i32
-  store i32 %v, i32* %p, align 8
+  store i32 %v, ptr %p, align 8
   ret void
 }
 
@@ -329,8 +329,8 @@ define void @sti32_a8(i32 *%p, i64 %w) {
 ; CHECK-NEXT: .functype ldi64_atomic_a8 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64_atomic_a8(i64 *%p) {
-  %v = load atomic i64, i64* %p seq_cst, align 8
+define i64 @ldi64_atomic_a8(ptr %p) {
+  %v = load atomic i64, ptr %p seq_cst, align 8
   ret i64 %v
 }
 
@@ -340,8 +340,8 @@ define i64 @ldi64_atomic_a8(i64 *%p) {
 ; CHECK-NEXT: .functype ldi64_atomic_a16 (i32) -> (i64){{$}}
 ; CHECK-NEXT: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64_atomic_a16(i64 *%p) {
-  %v = load atomic i64, i64* %p seq_cst, align 16
+define i64 @ldi64_atomic_a16(ptr %p) {
+  %v = load atomic i64, ptr %p seq_cst, align 16
   ret i64 %v
 }
 
@@ -353,8 +353,8 @@ define i64 @ldi64_atomic_a16(i64 *%p) {
 ; CHECK-NEXT: .functype sti64_atomic_a4 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64_atomic_a4(i64 *%p, i64 %v) {
- store atomic i64 %v, i64* %p seq_cst, align 8
+define void @sti64_atomic_a4(ptr %p, i64 %v) {
+ store atomic i64 %v, ptr %p seq_cst, align 8
  ret void
 }
 
@@ -364,7 +364,7 @@ define void @sti64_atomic_a4(i64 *%p, i64 %v) {
 ; CHECK-NEXT: .functype sti64_atomic_a8 (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64_atomic_a8(i64 *%p, i64 %v) {
- store atomic i64 %v, i64* %p seq_cst, align 16
+define void @sti64_atomic_a8(ptr %p, i64 %v) {
+ store atomic i64 %v, ptr %p seq_cst, align 16
  ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/indirect-import.ll b/llvm/test/CodeGen/WebAssembly/indirect-import.ll
index 4d00f420d409..deb9cf3dcd01 100644
--- a/llvm/test/CodeGen/WebAssembly/indirect-import.ll
+++ b/llvm/test/CodeGen/WebAssembly/indirect-import.ll
@@ -19,41 +19,41 @@ target triple = "wasm32"
 ; CHECK-LABEL: bar:
 define hidden i32 @bar() #0 {
 entry:
-  %fd = alloca float (double)*, align 4
-  %vj = alloca void (i64)*, align 4
-  %v = alloca void ()*, align 4
-  %ijidf = alloca i32 (i64, i32, double, float)*, align 4
-  %vs = alloca void (%struct.big*)*, align 4
-  %s = alloca void (%struct.big*)*, align 4
-  %i128ret = alloca i128 (i64)*, align 8
+  %fd = alloca ptr, align 4
+  %vj = alloca ptr, align 4
+  %v = alloca ptr, align 4
+  %ijidf = alloca ptr, align 4
+  %vs = alloca ptr, align 4
+  %s = alloca ptr, align 4
+  %i128ret = alloca ptr, align 8
 
 ; CHECK-DAG: i32.const       {{.+}}=, extern_fd
 ; CHECK-DAG: i32.const       {{.+}}=, extern_vj
-  store float (double)* @extern_fd, float (double)** %fd, align 4
-  store void (i64)* @extern_vj, void (i64)** %vj, align 4
-  %0 = load void (i64)*, void (i64)** %vj, align 4
+  store ptr @extern_fd, ptr %fd, align 4
+  store ptr @extern_vj, ptr %vj, align 4
+  %0 = load ptr, ptr %vj, align 4
   call void %0(i64 1)
 
 ; CHECK: i32.const       {{.+}}=, extern_v
-  store void ()* @extern_v, void ()** %v, align 4
-  %1 = load void ()*, void ()** %v, align 4
+  store ptr @extern_v, ptr %v, align 4
+  %1 = load ptr, ptr %v, align 4
   call void %1()
 
 ; CHECK: i32.const       {{.+}}=, extern_ijidf
-  store i32 (i64, i32, double, float)* @extern_ijidf, i32 (i64, i32, double, float)** %ijidf, align 4
-  %2 = load i32 (i64, i32, double, float)*, i32 (i64, i32, double, float)** %ijidf, align 4
+  store ptr @extern_ijidf, ptr %ijidf, align 4
+  %2 = load ptr, ptr %ijidf, align 4
   %call = call i32 %2(i64 1, i32 2, double 3.000000e+00, float 4.000000e+00)
 
 ; CHECK-DAG: i32.const       {{.+}}=, extern_struct
 ; CHECK-DAG: i32.const       {{.+}}=, extern_sret
-  store void (%struct.big*)* @extern_struct, void (%struct.big*)** %vs, align 4
-  store void (%struct.big*)* @extern_sret, void (%struct.big*)** %s, align 4
-  %3 = load float (double)*, float (double)** %fd, align 4
-  %4 = ptrtoint float (double)* %3 to i32
+  store ptr @extern_struct, ptr %vs, align 4
+  store ptr @extern_sret, ptr %s, align 4
+  %3 = load ptr, ptr %fd, align 4
+  %4 = ptrtoint ptr %3 to i32
 
 ; CHECK: i32.const       {{.+}}=, extern_i128ret
-  store i128 (i64)* @extern_i128ret, i128 (i64)** %i128ret, align 8
-  %5 = load i128 (i64)*, i128 (i64)** %i128ret, align 8
+  store ptr @extern_i128ret, ptr %i128ret, align 8
+  %5 = load ptr, ptr %i128ret, align 8
   %6 = call i128 %5(i64 1)
 
   ret i32 %4
@@ -67,9 +67,9 @@ declare void @extern_v() #1
 
 declare i32 @extern_ijidf(i64, i32, double, float) #1
 
-declare void @extern_struct(%struct.big* byval(%struct.big) align 8) #1
+declare void @extern_struct(ptr byval(%struct.big) align 8) #1
 
-declare void @extern_sret(%struct.big* sret(%struct.big)) #1
+declare void @extern_sret(ptr sret(%struct.big)) #1
 
 declare i128 @extern_i128ret(i64) #1
 

diff  --git a/llvm/test/CodeGen/WebAssembly/indirectbr.ll b/llvm/test/CodeGen/WebAssembly/indirectbr.ll
index 2cf41e4c5880..569d289d3d27 100644
--- a/llvm/test/CodeGen/WebAssembly/indirectbr.ll
+++ b/llvm/test/CodeGen/WebAssembly/indirectbr.ll
@@ -6,10 +6,10 @@
 
 target triple = "wasm32"
 
- at test1.targets = constant [4 x i8*] [i8* blockaddress(@test1, %bb0),
-                                     i8* blockaddress(@test1, %bb1),
-                                     i8* blockaddress(@test1, %bb2),
-                                     i8* blockaddress(@test1, %bb3)]
+ at test1.targets = constant [4 x ptr] [ptr blockaddress(@test1, %bb0),
+                                     ptr blockaddress(@test1, %bb1),
+                                     ptr blockaddress(@test1, %bb2),
+                                     ptr blockaddress(@test1, %bb3)]
 
 ; Just check the barest skeleton of the structure
 ; CHECK-LABEL: test1:
@@ -34,36 +34,36 @@ target triple = "wasm32"
 ; CHECK-NEXT: .int32
 ; CHECK-NEXT: .int32
 
-define void @test1(i32* readonly %p, i32* %sink) #0 {
+define void @test1(ptr readonly %p, ptr %sink) #0 {
 
 entry:
-  %i0 = load i32, i32* %p
-  %target.i0 = getelementptr [4 x i8*], [4 x i8*]* @test1.targets, i32 0, i32 %i0
-  %target0 = load i8*, i8** %target.i0
+  %i0 = load i32, ptr %p
+  %target.i0 = getelementptr [4 x ptr], ptr @test1.targets, i32 0, i32 %i0
+  %target0 = load ptr, ptr %target.i0
   ; Only a subset of blocks are viable successors here.
-  indirectbr i8* %target0, [label %bb0, label %bb1]
+  indirectbr ptr %target0, [label %bb0, label %bb1]
 
 
 bb0:
-  store volatile i32 0, i32* %sink
+  store volatile i32 0, ptr %sink
   br label %latch
 
 bb1:
-  store volatile i32 1, i32* %sink
+  store volatile i32 1, ptr %sink
   br label %latch
 
 bb2:
-  store volatile i32 2, i32* %sink
+  store volatile i32 2, ptr %sink
   br label %latch
 
 bb3:
-  store volatile i32 3, i32* %sink
+  store volatile i32 3, ptr %sink
   br label %latch
 
 latch:
-  %i.next = load i32, i32* %p
-  %target.i.next = getelementptr [4 x i8*], [4 x i8*]* @test1.targets, i32 0, i32 %i.next
-  %target.next = load i8*, i8** %target.i.next
+  %i.next = load i32, ptr %p
+  %target.i.next = getelementptr [4 x ptr], ptr @test1.targets, i32 0, i32 %i.next
+  %target.next = load ptr, ptr %target.i.next
   ; A 
diff erent subset of blocks are viable successors here.
-  indirectbr i8* %target.next, [label %bb1, label %bb2]
+  indirectbr ptr %target.next, [label %bb1, label %bb2]
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/inline-asm-m.ll b/llvm/test/CodeGen/WebAssembly/inline-asm-m.ll
index 5e30e23e3389..64758a41ee49 100644
--- a/llvm/test/CodeGen/WebAssembly/inline-asm-m.ll
+++ b/llvm/test/CodeGen/WebAssembly/inline-asm-m.ll
@@ -5,8 +5,8 @@
 
 target triple = "wasm32-unknown-unknown"
 
-define void @bar(i32* %r, i32* %s) {
+define void @bar(ptr %r, ptr %s) {
 entry:
-  tail call void asm sideeffect "# $0 = bbb($1)", "=*m,*m"(i32* %s, i32* %r) #0, !srcloc !1
+  tail call void asm sideeffect "# $0 = bbb($1)", "=*m,*m"(ptr %s, ptr %r) #0, !srcloc !1
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll b/llvm/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll
index a8f01629ec1a..8573744e619f 100644
--- a/llvm/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll
+++ b/llvm/test/CodeGen/WebAssembly/inline-asm-roundtrip.ll
@@ -32,7 +32,7 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-NEXT:	i32.const	3
 ; CHECK-NEXT:	i32.ne
 
-define i32 @main(i32 %argc, i8** nocapture readnone %argv) #0 {
+define i32 @main(i32 %argc, ptr nocapture readnone %argv) #0 {
 entry:
   %0 = tail call i32 asm "i32.const\092\0A\09local.get\09$1\0A\09i32.add\0A\09local.set\09$0", "=r,r"(i32 1) #1
   %cmp = icmp ne i32 %0, 3

diff  --git a/llvm/test/CodeGen/WebAssembly/inline-asm.ll b/llvm/test/CodeGen/WebAssembly/inline-asm.ll
index 038a03a71ddc..4462cfb7aa0c 100644
--- a/llvm/test/CodeGen/WebAssembly/inline-asm.ll
+++ b/llvm/test/CodeGen/WebAssembly/inline-asm.ll
@@ -76,8 +76,8 @@ entry:
 ; CHECK: local.get $push[[S0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: local.get $push[[S1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.store16 0($pop[[S0]]), $pop[[S1]]{{$}}
-define void @X_i16(i16 * %t) {
-  call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16* elementtype(i16) %t)
+define void @X_i16(ptr %t) {
+  call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(ptr elementtype(i16) %t)
   ret void
 }
 
@@ -86,15 +86,15 @@ define void @X_i16(i16 * %t) {
 ; CHECK: local.get $push[[S0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: local.get $push[[S1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.store 0($pop[[S0]]), $pop[[S1]]{{$}}
-define void @X_ptr(i16 ** %t) {
-  call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(i16** elementtype(i16*) %t)
+define void @X_ptr(ptr %t) {
+  call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"(ptr elementtype(ptr) %t)
   ret void
 }
 
 ; CHECK-LABEL: funcname:
 ; CHECK: foo funcname{{$}}
 define void @funcname() {
-  tail call void asm sideeffect "foo $0", "i"(void ()* nonnull @funcname) #0, !srcloc !0
+  tail call void asm sideeffect "foo $0", "i"(ptr nonnull @funcname) #0, !srcloc !0
   ret void
 }
 
@@ -102,7 +102,7 @@ define void @funcname() {
 ; CHECK: foo gv+37{{$}}
 @gv = global [0 x i8] zeroinitializer
 define void @varname() {
-  tail call void asm sideeffect "foo $0", "i"(i8* getelementptr inbounds ([0 x i8], [0 x i8]* @gv, i64 0, i64 37)) #0, !srcloc !0
+  tail call void asm sideeffect "foo $0", "i"(ptr getelementptr inbounds ([0 x i8], ptr @gv, i64 0, i64 37)) #0, !srcloc !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/inlineasm-output-template.ll b/llvm/test/CodeGen/WebAssembly/inlineasm-output-template.ll
index 92b02971a098..61931d7b5555 100644
--- a/llvm/test/CodeGen/WebAssembly/inlineasm-output-template.ll
+++ b/llvm/test/CodeGen/WebAssembly/inlineasm-output-template.ll
@@ -16,7 +16,7 @@ define dso_local i32 @test_inlineasm_c_output_template0() {
 ; CHECK: #TEST baz
 @baz = internal global i32 0, align 4
 define dso_local i32 @test_inlineasm_c_output_template2() {
-  tail call void asm sideeffect "#TEST ${0:c}", "i"(i32* nonnull @baz)
+  tail call void asm sideeffect "#TEST ${0:c}", "i"(ptr nonnull @baz)
   ret i32 42
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/ir-locals-stackid.ll b/llvm/test/CodeGen/WebAssembly/ir-locals-stackid.ll
index f5c3630ab65e..ff2003bf97ee 100644
--- a/llvm/test/CodeGen/WebAssembly/ir-locals-stackid.ll
+++ b/llvm/test/CodeGen/WebAssembly/ir-locals-stackid.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=wasm32-unknown-unknown -asm-verbose=false < %s | FileCheck %s --check-prefix=CHECKCG
 ; RUN: llc -mtriple=wasm32-unknown-unknown -stop-after=finalize-isel < %s | FileCheck %s --check-prefix=CHECKISEL
 
-%f32_cell = type float addrspace(1)*
+%f32_cell = type ptr addrspace(1)
 
 ; CHECKISEL-LABEL: name: ir_local_f32
 ; CHECKISEL:       stack:

diff  --git a/llvm/test/CodeGen/WebAssembly/ir-locals.ll b/llvm/test/CodeGen/WebAssembly/ir-locals.ll
index a70fcee55916..c5e66f530773 100644
--- a/llvm/test/CodeGen/WebAssembly/ir-locals.ll
+++ b/llvm/test/CodeGen/WebAssembly/ir-locals.ll
@@ -1,9 +1,9 @@
 ; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false | FileCheck %s
 
-%i32_cell = type i32 addrspace(1)*
-%i64_cell = type i64 addrspace(1)*
-%f32_cell = type float addrspace(1)*
-%f64_cell = type double addrspace(1)*
+%i32_cell = type ptr addrspace(1)
+%i64_cell = type ptr addrspace(1)
+%f32_cell = type ptr addrspace(1)
+%f64_cell = type ptr addrspace(1)
 
 ; We have a set of tests in which we set a local and then reload the
 ; local.  If the load immediately follows the set, the DAG combiner will

diff  --git a/llvm/test/CodeGen/WebAssembly/irreducible-cfg-exceptions.ll b/llvm/test/CodeGen/WebAssembly/irreducible-cfg-exceptions.ll
index b948dbf01f38..8fe32d66a2c5 100644
--- a/llvm/test/CodeGen/WebAssembly/irreducible-cfg-exceptions.ll
+++ b/llvm/test/CodeGen/WebAssembly/irreducible-cfg-exceptions.ll
@@ -10,7 +10,7 @@ declare i32 @__gxx_personality_v0(...)
 ; CHECK-LABEL: crashy:
 ; CHECK-NOT: br_table
 
-define void @crashy() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @crashy() personality ptr @__gxx_personality_v0 {
 entry:
   invoke void undef()
           to label %invoke.cont unwind label %lpad
@@ -20,7 +20,7 @@ invoke.cont:                                      ; preds = %entry
           to label %invoke.cont4 unwind label %lpad3
 
 invoke.cont4:                                     ; preds = %invoke.cont
-  %call.i82 = invoke i8* undef()
+  %call.i82 = invoke ptr undef()
           to label %invoke.cont6 unwind label %lpad3
 
 invoke.cont6:                                     ; preds = %invoke.cont4
@@ -57,27 +57,27 @@ invoke.cont25:                                    ; preds = %invoke.cont23
           to label %invoke.cont29 unwind label %lpad16.loopexit
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
   unreachable
 
 lpad3:                                            ; preds = %invoke.cont4, %invoke.cont
-  %1 = landingpad { i8*, i32 }
+  %1 = landingpad { ptr, i32 }
           cleanup
   unreachable
 
 lpad12:                                           ; preds = %invoke.cont6
-  %2 = landingpad { i8*, i32 }
+  %2 = landingpad { ptr, i32 }
           cleanup
-  resume { i8*, i32 } undef
+  resume { ptr, i32 } undef
 
 lpad16.loopexit:                                  ; preds = %if.then, %invoke.cont29, %invoke.cont25, %exit2, %land.lhs
-  %lpad.loopexit = landingpad { i8*, i32 }
+  %lpad.loopexit = landingpad { ptr, i32 }
           cleanup
   unreachable
 
 lpad22:                                           ; preds = %invoke.cont23, %exit3
-  %3 = landingpad { i8*, i32 }
+  %3 = landingpad { ptr, i32 }
           cleanup
   unreachable
 
@@ -89,7 +89,7 @@ invoke.cont33:                                    ; preds = %invoke.cont29
   br label %for.inc
 
 for.inc:                                          ; preds = %invoke.cont33
-  %cmp.i.i141 = icmp eq i8* undef, undef
+  %cmp.i.i141 = icmp eq ptr undef, undef
   br i1 %cmp.i.i141, label %if.then, label %if.end.i.i146
 
 if.then:                                          ; preds = %for.inc

diff  --git a/llvm/test/CodeGen/WebAssembly/irreducible-cfg.ll b/llvm/test/CodeGen/WebAssembly/irreducible-cfg.ll
index 8277c2c9e144..05712fdf991a 100644
--- a/llvm/test/CodeGen/WebAssembly/irreducible-cfg.ll
+++ b/llvm/test/CodeGen/WebAssembly/irreducible-cfg.ll
@@ -10,14 +10,14 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK: f64.load
 ; CHECK: i32.const $[[REG:[^,]+]]=
 ; CHECK: br_table  $[[REG]],
-define void @test0(double* %arg, i32 %arg1, i32 %arg2, i32 %arg3) {
+define void @test0(ptr %arg, i32 %arg1, i32 %arg2, i32 %arg3) {
 bb:
   %tmp = icmp eq i32 %arg2, 0
   br i1 %tmp, label %bb6, label %bb3
 
 bb3:                                              ; preds = %bb
-  %tmp4 = getelementptr double, double* %arg, i32 %arg3
-  %tmp5 = load double, double* %tmp4, align 4
+  %tmp4 = getelementptr double, ptr %arg, i32 %arg3
+  %tmp5 = load double, ptr %tmp4, align 4
   br label %bb13
 
 bb6:                                              ; preds = %bb13, %bb
@@ -26,18 +26,18 @@ bb6:                                              ; preds = %bb13, %bb
   br i1 %tmp8, label %bb9, label %bb19
 
 bb9:                                              ; preds = %bb6
-  %tmp10 = getelementptr double, double* %arg, i32 %tmp7
-  %tmp11 = load double, double* %tmp10, align 4
+  %tmp10 = getelementptr double, ptr %arg, i32 %tmp7
+  %tmp11 = load double, ptr %tmp10, align 4
   %tmp12 = fmul double %tmp11, 2.300000e+00
-  store double %tmp12, double* %tmp10, align 4
+  store double %tmp12, ptr %tmp10, align 4
   br label %bb13
 
 bb13:                                             ; preds = %bb9, %bb3
   %tmp14 = phi double [ %tmp5, %bb3 ], [ %tmp12, %bb9 ]
   %tmp15 = phi i32 [ undef, %bb3 ], [ %tmp7, %bb9 ]
-  %tmp16 = getelementptr double, double* %arg, i32 %tmp15
+  %tmp16 = getelementptr double, ptr %arg, i32 %tmp15
   %tmp17 = fadd double %tmp14, 1.300000e+00
-  store double %tmp17, double* %tmp16, align 4
+  store double %tmp17, ptr %tmp16, align 4
   %tmp18 = add nsw i32 %tmp15, 1
   br label %bb6
 
@@ -51,14 +51,14 @@ bb19:                                             ; preds = %bb6
 ; CHECK: f64.load
 ; CHECK: i32.const $[[REG:[^,]+]]=
 ; CHECK: br_table  $[[REG]],
-define void @test1(double* %arg, i32 %arg1, i32 %arg2, i32 %arg3) {
+define void @test1(ptr %arg, i32 %arg1, i32 %arg2, i32 %arg3) {
 bb:
   %tmp = icmp eq i32 %arg2, 0
   br i1 %tmp, label %bb6, label %bb3
 
 bb3:                                              ; preds = %bb
-  %tmp4 = getelementptr double, double* %arg, i32 %arg3
-  %tmp5 = load double, double* %tmp4, align 4
+  %tmp4 = getelementptr double, ptr %arg, i32 %arg3
+  %tmp5 = load double, ptr %tmp4, align 4
   br label %bb13
 
 bb6:                                              ; preds = %bb13, %bb
@@ -67,10 +67,10 @@ bb6:                                              ; preds = %bb13, %bb
   br i1 %tmp8, label %bb9, label %bb19
 
 bb9:                                              ; preds = %bb6
-  %tmp10 = getelementptr double, double* %arg, i32 %tmp7
-  %tmp11 = load double, double* %tmp10, align 4
+  %tmp10 = getelementptr double, ptr %arg, i32 %tmp7
+  %tmp11 = load double, ptr %tmp10, align 4
   %tmp12 = fmul double %tmp11, 2.300000e+00
-  store double %tmp12, double* %tmp10, align 4
+  store double %tmp12, ptr %tmp10, align 4
   br label %bb10
 
 bb10:                                             ; preds = %bb10, %bb9
@@ -82,9 +82,9 @@ bb10:                                             ; preds = %bb10, %bb9
 bb13:                                             ; preds = %bb10, %bb3
   %tmp14 = phi double [ %tmp5, %bb3 ], [ %tmp12, %bb10 ]
   %tmp15 = phi i32 [ undef, %bb3 ], [ %tmp7, %bb10 ]
-  %tmp16 = getelementptr double, double* %arg, i32 %tmp15
+  %tmp16 = getelementptr double, ptr %arg, i32 %tmp15
   %tmp17 = fadd double %tmp14, 1.300000e+00
-  store double %tmp17, double* %tmp16, align 4
+  store double %tmp17, ptr %tmp16, align 4
   %tmp18 = add nsw i32 %tmp15, 1
   br label %bb6
 
@@ -131,8 +131,8 @@ A2:                                               ; preds = %A2, %A1, %A0
 define void @test3(i32 %ws) {
 entry:
   %ws.addr = alloca i32, align 4
-  store volatile i32 %ws, i32* %ws.addr, align 4
-  %0 = load volatile i32, i32* %ws.addr, align 4
+  store volatile i32 %ws, ptr %ws.addr, align 4
+  %0 = load volatile i32, ptr %ws.addr, align 4
   %tobool = icmp ne i32 %0, 0
   br i1 %tobool, label %if.then, label %if.end
 
@@ -140,7 +140,7 @@ if.then:                                          ; preds = %entry
   br label %wynn
 
 if.end:                                           ; preds = %entry
-  %1 = load volatile i32, i32* %ws.addr, align 4
+  %1 = load volatile i32, ptr %ws.addr, align 4
   %tobool1 = icmp ne i32 %1, 0
   br i1 %tobool1, label %if.end9, label %if.then2
 
@@ -148,7 +148,7 @@ if.then2:                                         ; preds = %if.end
   br label %for.cond
 
 for.cond:                                         ; preds = %wynn, %if.then7, %if.then2
-  %2 = load volatile i32, i32* %ws.addr, align 4
+  %2 = load volatile i32, ptr %ws.addr, align 4
   %tobool3 = icmp ne i32 %2, 0
   br i1 %tobool3, label %if.then4, label %if.end5
 
@@ -156,7 +156,7 @@ if.then4:                                         ; preds = %for.cond
   br label %if.end5
 
 if.end5:                                          ; preds = %if.then4, %for.cond
-  %3 = load volatile i32, i32* %ws.addr, align 4
+  %3 = load volatile i32, ptr %ws.addr, align 4
   %tobool6 = icmp ne i32 %3, 0
   br i1 %tobool6, label %if.then7, label %if.end8
 
@@ -191,7 +191,7 @@ if.else.i52:                                      ; preds = %sw.bb5
   br label %for.cond57.i
 
 for.cond57.i:                                     ; preds = %for.inc205.i, %if.else.i52
-  store i32 0, i32* undef, align 4
+  store i32 0, ptr undef, align 4
   br label %for.cond65.i
 
 for.cond65.i:                                     ; preds = %for.inc201.i, %for.cond57.i
@@ -236,7 +236,7 @@ for.body:                                         ; preds = %psh
   br label %do.body
 
 do.body:                                          ; preds = %do.cond, %for.body
-  %cmp118 = icmp eq i32* undef, undef
+  %cmp118 = icmp eq ptr undef, undef
   br i1 %cmp118, label %Skip, label %do.cond
 
 do.cond:                                          ; preds = %do.body
@@ -259,7 +259,7 @@ Skip:                                             ; preds = %for.body39, %do.bod
 ; misinterpreted as irreducible control flow.
 ; CHECK: fannkuch_worker
 ; CHECK-NOT: br_table
-define i32 @fannkuch_worker(i8* %_arg) {
+define i32 @fannkuch_worker(ptr %_arg) {
 for.cond:
   br label %do.body
 

diff  --git a/llvm/test/CodeGen/WebAssembly/legalize.ll b/llvm/test/CodeGen/WebAssembly/legalize.ll
index cd7d719bff11..8710a0598d0d 100644
--- a/llvm/test/CodeGen/WebAssembly/legalize.ll
+++ b/llvm/test/CodeGen/WebAssembly/legalize.ll
@@ -8,7 +8,7 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK: i32.const   $push0=, 7{{$}}
 ; CHECK: i32.and     $push1=, $1, $pop0{{$}}
 ; CHECK: i32.shl     $push2=, $0, $pop1{{$}}
-define i3 @shl_i3(i3 %a, i3 %b, i3* %p) {
+define i3 @shl_i3(i3 %a, i3 %b, ptr %p) {
   %t = shl i3 %a, %b
   ret i3 %t
 }
@@ -17,7 +17,7 @@ define i3 @shl_i3(i3 %a, i3 %b, i3* %p) {
 ; CHECK: i64.const   $push0=, 9007199254740991{{$}}
 ; CHECK: i64.and     $push1=, $1, $pop0{{$}}
 ; CHECK: i64.shl     $push2=, $0, $pop1{{$}}
-define i53 @shl_i53(i53 %a, i53 %b, i53* %p) {
+define i53 @shl_i53(i53 %a, i53 %b, ptr %p) {
   %t = shl i53 %a, %b
   ret i53 %t
 }
@@ -35,8 +35,8 @@ define i64 @sext_in_reg_i32_i64(i64 %a) {
 ; CHECK: f32.load $push0=, 0($0){{$}}
 ; CHECK: f64.promote_f32 $push1=, $pop0{{$}}
 ; CHECK: return $pop1{{$}}
-define double @fpext_f32_f64(float *%p) {
-  %v = load float, float* %p
+define double @fpext_f32_f64(ptr %p) {
+  %v = load float, ptr %p
   %e = fpext float %v to double
   ret double %e
 }
@@ -45,8 +45,8 @@ define double @fpext_f32_f64(float *%p) {
 ; CHECK: f64.load $push0=, 0($0){{$}}
 ; CHECK: f32.demote_f64 $push1=, $pop0{{$}}
 ; CHECK: return $pop1{{$}}
-define float @fpconv_f64_f32(double *%p) {
-  %v = load double, double* %p
+define float @fpconv_f64_f32(ptr %p) {
+  %v = load double, ptr %p
   %e = fptrunc double %v to float
   ret float %e
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll b/llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll
index a3535caeee59..043130213551 100644
--- a/llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll
+++ b/llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll
@@ -7,8 +7,8 @@
 ; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @sext_i8_i32(i8 *%p) {
-  %v = load atomic i8, i8* %p seq_cst, align 1
+define i32 @sext_i8_i32(ptr %p) {
+  %v = load atomic i8, ptr %p seq_cst, align 1
   %e = sext i8 %v to i32
   ret i32 %e
 }
@@ -16,9 +16,9 @@ define i32 @sext_i8_i32(i8 *%p) {
 ; CHECK-LABEL: zext_i8_i32:
 ; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @zext_i8_i32(i8 *%p) {
+define i32 @zext_i8_i32(ptr %p) {
 e1:
-  %v = load atomic i8, i8* %p seq_cst, align 1
+  %v = load atomic i8, ptr %p seq_cst, align 1
   %e = zext i8 %v to i32
   ret i32 %e
 }
@@ -27,8 +27,8 @@ e1:
 ; CHECK: i32.atomic.load16_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i32 @sext_i16_i32(i16 *%p) {
-  %v = load atomic i16, i16* %p seq_cst, align 2
+define i32 @sext_i16_i32(ptr %p) {
+  %v = load atomic i16, ptr %p seq_cst, align 2
   %e = sext i16 %v to i32
   ret i32 %e
 }
@@ -36,8 +36,8 @@ define i32 @sext_i16_i32(i16 *%p) {
 ; CHECK-LABEL: zext_i16_i32:
 ; CHECK: i32.atomic.load16_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @zext_i16_i32(i16 *%p) {
-  %v = load atomic i16, i16* %p seq_cst, align 2
+define i32 @zext_i16_i32(ptr %p) {
+  %v = load atomic i16, ptr %p seq_cst, align 2
   %e = zext i16 %v to i32
   ret i32 %e
 }
@@ -46,8 +46,8 @@ define i32 @zext_i16_i32(i16 *%p) {
 ; CHECK: i64.atomic.load8_u $push0=, 0($0){{$}}
 ; CHECK: i64.extend8_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @sext_i8_i64(i8 *%p) {
-  %v = load atomic i8, i8* %p seq_cst, align 1
+define i64 @sext_i8_i64(ptr %p) {
+  %v = load atomic i8, ptr %p seq_cst, align 1
   %e = sext i8 %v to i64
   ret i64 %e
 }
@@ -55,8 +55,8 @@ define i64 @sext_i8_i64(i8 *%p) {
 ; CHECK-LABEL: zext_i8_i64:
 ; CHECK: i64.atomic.load8_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @zext_i8_i64(i8 *%p) {
-  %v = load atomic i8, i8* %p seq_cst, align 1
+define i64 @zext_i8_i64(ptr %p) {
+  %v = load atomic i8, ptr %p seq_cst, align 1
   %e = zext i8 %v to i64
   ret i64 %e
 }
@@ -65,8 +65,8 @@ define i64 @zext_i8_i64(i8 *%p) {
 ; CHECK: i64.atomic.load16_u $push0=, 0($0){{$}}
 ; CHECK: i64.extend16_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @sext_i16_i64(i16 *%p) {
-  %v = load atomic i16, i16* %p seq_cst, align 2
+define i64 @sext_i16_i64(ptr %p) {
+  %v = load atomic i16, ptr %p seq_cst, align 2
   %e = sext i16 %v to i64
   ret i64 %e
 }
@@ -74,8 +74,8 @@ define i64 @sext_i16_i64(i16 *%p) {
 ; CHECK-LABEL: zext_i16_i64:
 ; CHECK: i64.atomic.load16_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @zext_i16_i64(i16 *%p) {
-  %v = load atomic i16, i16* %p seq_cst, align 2
+define i64 @zext_i16_i64(ptr %p) {
+  %v = load atomic i16, ptr %p seq_cst, align 2
   %e = zext i16 %v to i64
   ret i64 %e
 }
@@ -84,8 +84,8 @@ define i64 @zext_i16_i64(i16 *%p) {
 ; CHECK: i32.atomic.load $push0=, 0($0){{$}}
 ; CHECK: i64.extend_i32_s $push1=, $pop0{{$}}
 ; CHECK-NEXT: return $pop1{{$}}
-define i64 @sext_i32_i64(i32 *%p) {
-  %v = load atomic i32, i32* %p seq_cst, align 4
+define i64 @sext_i32_i64(ptr %p) {
+  %v = load atomic i32, ptr %p seq_cst, align 4
   %e = sext i32 %v to i64
   ret i64 %e
 }
@@ -93,8 +93,8 @@ define i64 @sext_i32_i64(i32 *%p) {
 ; CHECK-LABEL: zext_i32_i64:
 ; CHECK: i64.atomic.load32_u $push0=, 0($0){{$}}
 ; CHECK: return $pop0{{$}}
-define i64 @zext_i32_i64(i32 *%p) {
-  %v = load atomic i32, i32* %p seq_cst, align 4
+define i64 @zext_i32_i64(ptr %p) {
+  %v = load atomic i32, ptr %p seq_cst, align 4
   %e = zext i32 %v to i64
   ret i64 %e
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/load-ext.ll b/llvm/test/CodeGen/WebAssembly/load-ext.ll
index 7663b4ab701b..290616de0fff 100644
--- a/llvm/test/CodeGen/WebAssembly/load-ext.ll
+++ b/llvm/test/CodeGen/WebAssembly/load-ext.ll
@@ -6,8 +6,8 @@
 ; CHECK-LABEL: sext_i8_i32:
 ; CHECK: i32.load8_s $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @sext_i8_i32(i8 *%p) {
-  %v = load i8, i8* %p
+define i32 @sext_i8_i32(ptr %p) {
+  %v = load i8, ptr %p
   %e = sext i8 %v to i32
   ret i32 %e
 }
@@ -15,8 +15,8 @@ define i32 @sext_i8_i32(i8 *%p) {
 ; CHECK-LABEL: zext_i8_i32:
 ; CHECK: i32.load8_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @zext_i8_i32(i8 *%p) {
-  %v = load i8, i8* %p
+define i32 @zext_i8_i32(ptr %p) {
+  %v = load i8, ptr %p
   %e = zext i8 %v to i32
   ret i32 %e
 }
@@ -24,8 +24,8 @@ define i32 @zext_i8_i32(i8 *%p) {
 ; CHECK-LABEL: sext_i16_i32:
 ; CHECK: i32.load16_s $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @sext_i16_i32(i16 *%p) {
-  %v = load i16, i16* %p
+define i32 @sext_i16_i32(ptr %p) {
+  %v = load i16, ptr %p
   %e = sext i16 %v to i32
   ret i32 %e
 }
@@ -33,8 +33,8 @@ define i32 @sext_i16_i32(i16 *%p) {
 ; CHECK-LABEL: zext_i16_i32:
 ; CHECK: i32.load16_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @zext_i16_i32(i16 *%p) {
-  %v = load i16, i16* %p
+define i32 @zext_i16_i32(ptr %p) {
+  %v = load i16, ptr %p
   %e = zext i16 %v to i32
   ret i32 %e
 }
@@ -42,8 +42,8 @@ define i32 @zext_i16_i32(i16 *%p) {
 ; CHECK-LABEL: sext_i8_i64:
 ; CHECK: i64.load8_s $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @sext_i8_i64(i8 *%p) {
-  %v = load i8, i8* %p
+define i64 @sext_i8_i64(ptr %p) {
+  %v = load i8, ptr %p
   %e = sext i8 %v to i64
   ret i64 %e
 }
@@ -51,8 +51,8 @@ define i64 @sext_i8_i64(i8 *%p) {
 ; CHECK-LABEL: zext_i8_i64:
 ; CHECK: i64.load8_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @zext_i8_i64(i8 *%p) {
-  %v = load i8, i8* %p
+define i64 @zext_i8_i64(ptr %p) {
+  %v = load i8, ptr %p
   %e = zext i8 %v to i64
   ret i64 %e
 }
@@ -60,8 +60,8 @@ define i64 @zext_i8_i64(i8 *%p) {
 ; CHECK-LABEL: sext_i16_i64:
 ; CHECK: i64.load16_s $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @sext_i16_i64(i16 *%p) {
-  %v = load i16, i16* %p
+define i64 @sext_i16_i64(ptr %p) {
+  %v = load i16, ptr %p
   %e = sext i16 %v to i64
   ret i64 %e
 }
@@ -69,8 +69,8 @@ define i64 @sext_i16_i64(i16 *%p) {
 ; CHECK-LABEL: zext_i16_i64:
 ; CHECK: i64.load16_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @zext_i16_i64(i16 *%p) {
-  %v = load i16, i16* %p
+define i64 @zext_i16_i64(ptr %p) {
+  %v = load i16, ptr %p
   %e = zext i16 %v to i64
   ret i64 %e
 }
@@ -78,8 +78,8 @@ define i64 @zext_i16_i64(i16 *%p) {
 ; CHECK-LABEL: sext_i32_i64:
 ; CHECK: i64.load32_s $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @sext_i32_i64(i32 *%p) {
-  %v = load i32, i32* %p
+define i64 @sext_i32_i64(ptr %p) {
+  %v = load i32, ptr %p
   %e = sext i32 %v to i64
   ret i64 %e
 }
@@ -87,8 +87,8 @@ define i64 @sext_i32_i64(i32 *%p) {
 ; CHECK-LABEL: zext_i32_i64:
 ; CHECK: i64.load32_u $push0=, 0($0){{$}}
 ; CHECK: return $pop0{{$}}
-define i64 @zext_i32_i64(i32 *%p) {
-  %v = load i32, i32* %p
+define i64 @zext_i32_i64(ptr %p) {
+  %v = load i32, ptr %p
   %e = zext i32 %v to i64
   ret i64 %e
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/load-store-i1.ll b/llvm/test/CodeGen/WebAssembly/load-store-i1.ll
index 538c647dfcc7..73d3a4ab1c03 100644
--- a/llvm/test/CodeGen/WebAssembly/load-store-i1.ll
+++ b/llvm/test/CodeGen/WebAssembly/load-store-i1.ll
@@ -6,8 +6,8 @@
 ; CHECK-LABEL: load_u_i1_i32:
 ; CHECK:      i32.load8_u $push[[NUM0:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM0]]{{$}}
-define i32 @load_u_i1_i32(i1* %p) {
-  %v = load i1, i1* %p
+define i32 @load_u_i1_i32(ptr %p) {
+  %v = load i1, ptr %p
   %e = zext i1 %v to i32
   ret i32 %e
 }
@@ -19,8 +19,8 @@ define i32 @load_u_i1_i32(i1* %p) {
 ; CHECK-NEXT: i32.and $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: i32.sub $push[[NUM4:[0-9]+]]=, $pop[[NUM3]], $pop[[NUM2]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM4]]{{$}}
-define i32 @load_s_i1_i32(i1* %p) {
-  %v = load i1, i1* %p
+define i32 @load_s_i1_i32(ptr %p) {
+  %v = load i1, ptr %p
   %e = sext i1 %v to i32
   ret i32 %e
 }
@@ -28,8 +28,8 @@ define i32 @load_s_i1_i32(i1* %p) {
 ; CHECK-LABEL: load_u_i1_i64:
 ; CHECK:      i64.load8_u $push[[NUM0:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM0]]{{$}}
-define i64 @load_u_i1_i64(i1* %p) {
-  %v = load i1, i1* %p
+define i64 @load_u_i1_i64(ptr %p) {
+  %v = load i1, ptr %p
   %e = zext i1 %v to i64
   ret i64 %e
 }
@@ -41,8 +41,8 @@ define i64 @load_u_i1_i64(i1* %p) {
 ; CHECK-NEXT: i64.and $push[[NUM2:[0-9]+]]=, $pop[[NUM0]], $pop[[NUM1]]{{$}}
 ; CHECK-NEXT: i64.sub $push[[NUM4:[0-9]+]]=, $pop[[NUM3]], $pop[[NUM2]]{{$}}
 ; CHECK-NEXT: return $pop[[NUM4]]{{$}}
-define i64 @load_s_i1_i64(i1* %p) {
-  %v = load i1, i1* %p
+define i64 @load_s_i1_i64(ptr %p) {
+  %v = load i1, ptr %p
   %e = sext i1 %v to i64
   ret i64 %e
 }
@@ -51,9 +51,9 @@ define i64 @load_s_i1_i64(i1* %p) {
 ; CHECK:      i32.const $push[[NUM0:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.and $push[[NUM1:[0-9]+]]=, $1, $pop[[NUM0]]{{$}}
 ; CHECK-NEXT: i32.store8 0($0), $pop[[NUM1]]{{$}}
-define void @store_i32_i1(i1* %p, i32 %v) {
+define void @store_i32_i1(ptr %p, i32 %v) {
   %t = trunc i32 %v to i1
-  store i1 %t, i1* %p
+  store i1 %t, ptr %p
   ret void
 }
 
@@ -61,8 +61,8 @@ define void @store_i32_i1(i1* %p, i32 %v) {
 ; CHECK:      i64.const $push[[NUM0:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.and $push[[NUM1:[0-9]+]]=, $1, $pop[[NUM0]]{{$}}
 ; CHECK-NEXT: i64.store8 0($0), $pop[[NUM1]]{{$}}
-define void @store_i64_i1(i1* %p, i64 %v) {
+define void @store_i64_i1(ptr %p, i64 %v) {
   %t = trunc i64 %v to i1
-  store i1 %t, i1* %p
+  store i1 %t, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/load-store-pic.ll b/llvm/test/CodeGen/WebAssembly/load-store-pic.ll
index dd38184d7873..ebf98aba9995 100644
--- a/llvm/test/CodeGen/WebAssembly/load-store-pic.ll
+++ b/llvm/test/CodeGen/WebAssembly/load-store-pic.ll
@@ -26,7 +26,7 @@ define i32 @load_hidden_global() {
 ; PIC-NEXT:    i32.load $push[[L3:[0-9]+]]=, 0($pop[[L2]]){{$}}
 ; CHECK-NEXT:    end_function
 
-  %1 = load i32, i32* @hidden_global
+  %1 = load i32, ptr @hidden_global
   ret i32 %1
 }
 
@@ -40,8 +40,8 @@ define i32 @load_hidden_global_offset() {
 ; PIC-NEXT:    i32.load $push{{[0-9]+}}=, 0($pop[[L4]]){{$}}
 ; CHECK-NEXT:  end_function
 
-  %1 = getelementptr [10 x i32], [10 x i32]* @hidden_global_array, i32 0, i32 5
-  %2 = load i32, i32* %1
+  %1 = getelementptr [10 x i32], ptr @hidden_global_array, i32 0, i32 5
+  %2 = load i32, ptr %1
   ret i32 %2
 }
 
@@ -55,7 +55,7 @@ define void @store_hidden_global(i32 %n) {
 ; PIC-NEXT:    i32.store 0($pop[[L2]]), $0{{$}}
 ; CHECK-NEXT:    end_function
 
-  store i32 %n, i32* @hidden_global
+  store i32 %n, ptr @hidden_global
   ret void
 }
 
@@ -70,8 +70,8 @@ define void @store_hidden_global_offset(i32 %n) {
 
 ; CHECK-NEXT:   end_function
 
-  %1 = getelementptr [10 x i32], [10 x i32]* @hidden_global_array, i32 0, i32 5
-  store i32 %n, i32* %1
+  %1 = getelementptr [10 x i32], ptr @hidden_global_array, i32 0, i32 5
+  store i32 %n, ptr %1
   ret void
 }
 
@@ -86,7 +86,7 @@ define i32 @load_external_global() {
 
 ; CHECK-NEXT:   end_function
 
-  %1 = load i32, i32* @external_global
+  %1 = load i32, ptr @external_global
   ret i32 %1
 }
 
@@ -99,8 +99,8 @@ define i32 @load_external_global_offset() {
 
 ; CHECK-NEXT:   end_function
 
-  %1 = getelementptr [10 x i32], [10 x i32]* @external_global_array, i32 0, i32 5
-  %2 = load i32, i32* %1
+  %1 = getelementptr [10 x i32], ptr @external_global_array, i32 0, i32 5
+  %2 = load i32, ptr %1
   ret i32 %2
 }
 
@@ -113,7 +113,7 @@ define void @store_external_global(i32 %n) {
 
 ; CHECK-NEXT:   end_function
 
-  store i32 %n, i32* @external_global
+  store i32 %n, ptr @external_global
   ret void
 }
 
@@ -126,7 +126,7 @@ define void @store_external_global_offset(i32 %n) {
 
 ; CHECK-NEXT:   end_function
 
-  %1 = getelementptr [10 x i32], [10 x i32]* @external_global_array, i32 0, i32 5
-  store i32 %n, i32* %1
+  %1 = getelementptr [10 x i32], ptr @external_global_array, i32 0, i32 5
+  store i32 %n, ptr %1
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/load-store-static.ll b/llvm/test/CodeGen/WebAssembly/load-store-static.ll
index fa13fa1c0d87..1701d024782b 100644
--- a/llvm/test/CodeGen/WebAssembly/load-store-static.ll
+++ b/llvm/test/CodeGen/WebAssembly/load-store-static.ll
@@ -18,7 +18,7 @@ define i32 @load_hidden_global() {
 ; NON-PIC-NEXT:    i32.load $push1=, hidden_global($pop0){{$}}
 ; CHECK-NEXT:    end_function
 
-  %1 = load i32, i32* @hidden_global
+  %1 = load i32, ptr @hidden_global
   ret i32 %1
 }
 
@@ -28,8 +28,8 @@ define i32 @load_hidden_global_offset() {
 ; NON-PIC-NEXT:i32.load  $push1=, hidden_global_array+20($pop0){{$}}
 ; CHECK-NEXT:  end_function
 
-  %1 = getelementptr [10 x i32], [10 x i32]* @hidden_global_array, i32 0, i32 5
-  %2 = load i32, i32* %1
+  %1 = getelementptr [10 x i32], ptr @hidden_global_array, i32 0, i32 5
+  %2 = load i32, ptr %1
   ret i32 %2
 }
 
@@ -41,7 +41,7 @@ define void @store_hidden_global(i32 %n) {
 ; NON-PIC-NEXT:  i32.store hidden_global($pop0), $0{{$}}
 ; CHECK-NEXT:    end_function
 
-  store i32 %n, i32* @hidden_global
+  store i32 %n, ptr @hidden_global
   ret void
 }
 
@@ -51,8 +51,8 @@ define void @store_hidden_global_offset(i32 %n) {
 ; NON-PIC-NEXT: i32.store hidden_global_array+20($pop0), $0{{$}}
 ; CHECK-NEXT:   end_function
 
-  %1 = getelementptr [10 x i32], [10 x i32]* @hidden_global_array, i32 0, i32 5
-  store i32 %n, i32* %1
+  %1 = getelementptr [10 x i32], ptr @hidden_global_array, i32 0, i32 5
+  store i32 %n, ptr %1
   ret void
 }
 
@@ -66,7 +66,7 @@ define i32 @load_external_global() {
 ; NON-PIC-NEXT: i32.load $push1=, external_global($pop0){{$}}
 ; CHECK-NEXT:   end_function
 
-  %1 = load i32, i32* @external_global
+  %1 = load i32, ptr @external_global
   ret i32 %1
 }
 
@@ -76,8 +76,8 @@ define i32 @load_external_global_offset() {
 ; NON-PIC-NEXT: i32.load $push{{[0-9]+}}=, external_global_array+20($pop[[L0]]){{$}}
 ; CHECK-NEXT:   end_function
 
-  %1 = getelementptr [10 x i32], [10 x i32]* @external_global_array, i32 0, i32 5
-  %2 = load i32, i32* %1
+  %1 = getelementptr [10 x i32], ptr @external_global_array, i32 0, i32 5
+  %2 = load i32, ptr %1
   ret i32 %2
 }
 
@@ -89,7 +89,7 @@ define void @store_external_global(i32 %n) {
 ; NON-PIC-NEXT: i32.store external_global($pop0), $0{{$}}
 ; CHECK-NEXT:   end_function
 
-  store i32 %n, i32* @external_global
+  store i32 %n, ptr @external_global
   ret void
 }
 
@@ -99,7 +99,7 @@ define void @store_external_global_offset(i32 %n) {
 ; NON-PIC-NEXT: i32.store external_global_array+20($pop0), $0{{$}}
 ; CHECK-NEXT:   end_function
 
-  %1 = getelementptr [10 x i32], [10 x i32]* @external_global_array, i32 0, i32 5
-  store i32 %n, i32* %1
+  %1 = getelementptr [10 x i32], ptr @external_global_array, i32 0, i32 5
+  store i32 %n, ptr %1
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/load.ll b/llvm/test/CodeGen/WebAssembly/load.ll
index e786c329ac8b..e1412832e90a 100644
--- a/llvm/test/CodeGen/WebAssembly/load.ll
+++ b/llvm/test/CodeGen/WebAssembly/load.ll
@@ -11,8 +11,8 @@
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i32 @ldi32(i32 *%p) {
-  %v = load i32, i32* %p
+define i32 @ldi32(ptr %p) {
+  %v = load i32, ptr %p
   ret i32 %v
 }
 
@@ -22,8 +22,8 @@ define i32 @ldi32(i32 *%p) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i64 @ldi64(i64 *%p) {
-  %v = load i64, i64* %p
+define i64 @ldi64(ptr %p) {
+  %v = load i64, ptr %p
   ret i64 %v
 }
 
@@ -33,8 +33,8 @@ define i64 @ldi64(i64 *%p) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f32.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define float @ldf32(float *%p) {
-  %v = load float, float* %p
+define float @ldf32(ptr %p) {
+  %v = load float, ptr %p
   ret float %v
 }
 
@@ -44,7 +44,7 @@ define float @ldf32(float *%p) {
 ; CHECK-NEXT: local.get $push[[L0:[0-9]+]]=, 0{{$}}
 ; CHECK-NEXT: f64.load $push[[NUM:[0-9]+]]=, 0($pop[[L0]]){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define double @ldf64(double *%p) {
-  %v = load double, double* %p
+define double @ldf64(ptr %p) {
+  %v = load double, ptr %p
   ret double %v
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
index 0edf2086b83f..4f33439db770 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
@@ -8,17 +8,17 @@ target triple = "wasm32-unknown-unknown"
 
 %struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
 
-define void @exception() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @exception() personality ptr @__gxx_personality_v0 {
 entry:
   invoke {i32, i32} @foo(i32 3)
           to label %try.cont unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %1 = landingpad { i8*, i32 }
-          catch i8* null
-  %2 = extractvalue { i8*, i32 } %1, 0
-  %3 = extractvalue { i8*, i32 } %1, 1
-  %4 = call i8* @__cxa_begin_catch(i8* %2) #2
+  %1 = landingpad { ptr, i32 }
+          catch ptr null
+  %2 = extractvalue { ptr, i32 } %1, 0
+  %3 = extractvalue { ptr, i32 } %1, 1
+  %4 = call ptr @__cxa_begin_catch(ptr %2) #2
   call void @__cxa_end_catch()
   br label %try.cont
 
@@ -29,24 +29,22 @@ try.cont:                                         ; preds = %entry, %lpad
 define void @setjmp_longjmp() {
 entry:
   %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
-  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %buf, i32 0, i32 0
-  %call = call i32 @setjmp(%struct.__jmp_buf_tag* %arraydecay) #0
-  %arraydecay1 = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %buf, i32 0, i32 0
+  %call = call i32 @setjmp(ptr %buf) #0
   call {i32, i32} @foo(i32 3)
-  call void @longjmp(%struct.__jmp_buf_tag* %arraydecay1, i32 1) #1
+  call void @longjmp(ptr %buf, i32 1) #1
   unreachable
 }
 
 declare {i32, i32} @foo(i32)
 declare i32 @__gxx_personality_v0(...)
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 declare void @__cxa_end_catch()
 ; Function Attrs: returns_twice
-declare i32 @setjmp(%struct.__jmp_buf_tag*) #0
+declare i32 @setjmp(ptr) #0
 ; Function Attrs: noreturn
-declare void @longjmp(%struct.__jmp_buf_tag*, i32) #1
-declare i8* @malloc(i32)
-declare void @free(i8*)
+declare void @longjmp(ptr, i32) #1
+declare ptr @malloc(i32)
+declare void @free(ptr)
 
 attributes #0 = { returns_twice }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
index 58a4dc852b9f..aa4d87756c87 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-options.ll
@@ -21,7 +21,7 @@ target triple = "wasm32-unknown-unknown"
 
 %struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
 
-define void @exception() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @exception() personality ptr @__gxx_personality_v0 {
 ; EH-LABEL:   type exception, at function
 ; NONE-LABEL: type exception, at function
 entry:
@@ -39,11 +39,11 @@ invoke.cont:
 ; NONE:   call bar
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
-          catch i8* null
-  %1 = extractvalue { i8*, i32 } %0, 0
-  %2 = extractvalue { i8*, i32 } %0, 1
-  %3 = call i8* @__cxa_begin_catch(i8* %1) #2
+  %0 = landingpad { ptr, i32 }
+          catch ptr null
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = extractvalue { ptr, i32 } %0, 1
+  %3 = call ptr @__cxa_begin_catch(ptr %1) #2
   call void @__cxa_end_catch()
   br label %try.cont
 
@@ -56,16 +56,14 @@ define void @setjmp_longjmp() {
 ; NONE-LABEL: type setjmp_longjmp, at function
 entry:
   %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
-  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %buf, i32 0, i32 0
-  %call = call i32 @setjmp(%struct.__jmp_buf_tag* %arraydecay) #0
-  %arraydecay1 = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %buf, i32 0, i32 0
-  call void @longjmp(%struct.__jmp_buf_tag* %arraydecay1, i32 1) #1
+  %call = call i32 @setjmp(ptr %buf) #0
+  call void @longjmp(ptr %buf, i32 1) #1
   unreachable
 ; SJLJ: call saveSetjmp
 ; SJLJ: i32.const emscripten_longjmp
 ; SJLJ-NOT: i32.const emscripten_longjmp_jmpbuf
 ; SJLJ: call invoke_vii
-; SJLJ-NOT: call "__invoke_void_%struct.__jmp_buf_tag*_i32"
+; SJLJ-NOT: call "__invoke_void_ptr_i32"
 ; SJLJ: call testSetjmp
 
 ; NONE: call setjmp
@@ -87,14 +85,14 @@ entry:
 declare void @foo(i32)
 declare void @bar()
 declare i32 @__gxx_personality_v0(...)
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 declare void @__cxa_end_catch()
 ; Function Attrs: returns_twice
-declare i32 @setjmp(%struct.__jmp_buf_tag*) #0
+declare i32 @setjmp(ptr) #0
 ; Function Attrs: noreturn
-declare void @longjmp(%struct.__jmp_buf_tag*, i32) #1
-declare i8* @malloc(i32)
-declare void @free(i8*)
+declare void @longjmp(ptr, i32) #1
+declare ptr @malloc(i32)
+declare void @free(ptr)
 
 attributes #0 = { returns_twice }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/WebAssembly/lower-em-exceptions-allowed.ll b/llvm/test/CodeGen/WebAssembly/lower-em-exceptions-allowed.ll
index 178c2aefd11b..96597a46b664 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-exceptions-allowed.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-exceptions-allowed.ll
@@ -3,7 +3,7 @@
 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
 target triple = "wasm32-unknown-unknown"
 
-define void @dont_catch() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @dont_catch() personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: @dont_catch(
 entry:
   invoke void @foo()
@@ -16,14 +16,14 @@ invoke.cont:                                      ; preds = %entry
   br label %try.cont
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
-          catch i8* null
-  %1 = extractvalue { i8*, i32 } %0, 0
-  %2 = extractvalue { i8*, i32 } %0, 1
+  %0 = landingpad { ptr, i32 }
+          catch ptr null
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = extractvalue { ptr, i32 } %0, 1
   br label %catch
 
 catch:                                            ; preds = %lpad
-  %3 = call i8* @__cxa_begin_catch(i8* %1)
+  %3 = call ptr @__cxa_begin_catch(ptr %1)
   call void @__cxa_end_catch()
   br label %try.cont
 
@@ -31,27 +31,27 @@ try.cont:                                         ; preds = %catch, %invoke.cont
   ret void
 }
 
-define void @do_catch() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @do_catch() personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: @do_catch(
 entry:
   invoke void @foo()
           to label %invoke.cont unwind label %lpad
 ; CHECK: entry:
-; CHECK-NEXT: store i32 0, i32*
-; CHECK-NEXT: call cc{{.*}} void @__invoke_void(void ()* @foo)
+; CHECK-NEXT: store i32 0, ptr
+; CHECK-NEXT: call cc{{.*}} void @__invoke_void(ptr @foo)
 
 invoke.cont:                                      ; preds = %entry
   br label %try.cont
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
-          catch i8* null
-  %1 = extractvalue { i8*, i32 } %0, 0
-  %2 = extractvalue { i8*, i32 } %0, 1
+  %0 = landingpad { ptr, i32 }
+          catch ptr null
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = extractvalue { ptr, i32 } %0, 1
   br label %catch
 
 catch:                                            ; preds = %lpad
-  %3 = call i8* @__cxa_begin_catch(i8* %1)
+  %3 = call ptr @__cxa_begin_catch(ptr %1)
   call void @__cxa_end_catch()
   br label %try.cont
 
@@ -61,5 +61,5 @@ try.cont:                                         ; preds = %catch, %invoke.cont
 
 declare void @foo()
 declare i32 @__gxx_personality_v0(...)
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 declare void @__cxa_end_catch()

diff  --git a/llvm/test/CodeGen/WebAssembly/lower-em-exceptions-resume-only.ll b/llvm/test/CodeGen/WebAssembly/lower-em-exceptions-resume-only.ll
index 6996a6bd138c..fc77029cf457 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-exceptions-resume-only.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-exceptions-resume-only.ll
@@ -7,11 +7,11 @@ target triple = "wasm32-unknown-unknown"
 ; correctly and does not crash.
 ; CHECK-LABEL: @resume_only
 ; CHECK: call void @__resumeException
-define void @resume_only() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @resume_only() personality ptr @__gxx_personality_v0 {
 entry:
-  %val0 = insertvalue { i8*, i32 } undef, i8* null, 0
-  %val1 = insertvalue { i8*, i32} %val0, i32 0, 1
-  resume { i8*, i32 } %val1
+  %val0 = insertvalue { ptr, i32 } undef, ptr null, 0
+  %val1 = insertvalue { ptr, i32} %val0, i32 0, 1
+  resume { ptr, i32 } %val1
 }
 
 declare i32 @__gxx_personality_v0(...)

diff  --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
index de020c5f2606..1a85a63e44ad 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alias.ll
@@ -7,30 +7,29 @@ target triple = "wasm32-unknown-emscripten"
 ; function that cannot longjmp.
 
 %struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
- at malloc = weak alias i8* (i32), i8* (i32)* @dlmalloc
+ at malloc = weak alias ptr (i32), ptr @dlmalloc
 
 ; CHECK-LABEL: @malloc_test
 define void @malloc_test() {
 entry:
-  ; CHECK: call i8* @malloc
+  ; CHECK: call ptr @malloc
   %retval = alloca i32, align 4
   %jmp = alloca [1 x %struct.__jmp_buf_tag], align 16
-  store i32 0, i32* %retval, align 4
-  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %jmp, i32 0, i32 0
-  %call = call i32 @setjmp(%struct.__jmp_buf_tag* %arraydecay) #0
+  store i32 0, ptr %retval, align 4
+  %call = call i32 @setjmp(ptr %jmp) #0
   call void @foo()
   ret void
 }
 
 ; This is a dummy dlmalloc implemenation only to make compiler pass, because an
 ; alias (malloc) has to point an actual definition.
-define i8* @dlmalloc(i32) {
-  %p = inttoptr i32 0 to i8*
-  ret i8* %p
+define ptr @dlmalloc(i32) {
+  %p = inttoptr i32 0 to ptr
+  ret ptr %p
 }
 
 declare void @foo()
 ; Function Attrs: returns_twice
-declare i32 @setjmp(%struct.__jmp_buf_tag*) #0
+declare i32 @setjmp(ptr) #0
 
 attributes #0 = { returns_twice }

diff  --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-indirect-setjmp.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-indirect-setjmp.ll
index d488bdb6b0b3..48f35c549cb7 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-indirect-setjmp.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-indirect-setjmp.ll
@@ -7,21 +7,20 @@ target triple = "wasm32-unknown-unknown"
 %struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
 
 ; CHECK: LLVM ERROR: Indirect use of setjmp is not supported
- at setjmp_fp = global i32 (%struct.__jmp_buf_tag*)* @setjmp, align 4
+ at setjmp_fp = global ptr @setjmp, align 4
 
 define void @indirect_setjmp_call() {
 entry:
   %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
-  %0 = load i32 (%struct.__jmp_buf_tag*)*, i32 (%struct.__jmp_buf_tag*)** @setjmp_fp, align 4
-  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %buf, i32 0, i32 0
-  %call = call i32 %0(%struct.__jmp_buf_tag* %arraydecay)
+  %0 = load ptr, ptr @setjmp_fp, align 4
+  %call = call i32 %0(ptr %buf)
   call void @foo()
   ret void
 }
 
 declare void @foo()
 ; Function Attrs: returns_twice
-declare i32 @setjmp(%struct.__jmp_buf_tag*) #0
+declare i32 @setjmp(ptr) #0
 
 attributes #0 = { returns_twice }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-sret.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-sret.ll
index 1ea09d89576e..027e883e2da6 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-sret.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-sret.ll
@@ -4,7 +4,7 @@ target triple = "wasm32-unknown-unknown"
 
 %struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
 
-declare i32 @setjmp(%struct.__jmp_buf_tag*) #0
+declare i32 @setjmp(ptr) #0
 declare {i32, i32} @returns_struct()
 
 ; Test the combination of backend legalization of large return types and the
@@ -12,8 +12,7 @@ declare {i32, i32} @returns_struct()
 define {i32, i32} @legalized_to_sret() {
 entry:
   %env = alloca [1 x %struct.__jmp_buf_tag], align 16
-  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %env, i32 0, i32 0
-  %call = call i32 @setjmp(%struct.__jmp_buf_tag* %arraydecay) #0
+  %call = call i32 @setjmp(ptr %env) #0
   ; This is the function pointer to pass to invoke.
   ; It needs to be the first argument (that's what we're testing here)
   ; CHECK: i32.const $push[[FPTR:[0-9]+]]=, returns_struct

diff  --git a/llvm/test/CodeGen/WebAssembly/lower-global-dtors.ll b/llvm/test/CodeGen/WebAssembly/lower-global-dtors.ll
index c366f312ed03..a7692525aa14 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-global-dtors.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-global-dtors.ll
@@ -26,28 +26,28 @@ declare void @after_the_null()
 @associatedc3 = global i8 84
 
 @llvm.global_ctors = appending global
-[1 x { i32, void ()*, i8* }]
+[1 x { i32, ptr, ptr }]
 [
-  { i32, void ()*, i8* } { i32 200, void ()* @orig_ctor, i8* null }
+  { i32, ptr, ptr } { i32 200, ptr @orig_ctor, ptr null }
 ]
 
 @llvm.global_dtors = appending global
-[14 x { i32, void ()*, i8* }]
+[14 x { i32, ptr, ptr }]
 [
-  { i32, void ()*, i8* } { i32 0, void ()* @orig_dtor0, i8* null },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1a, i8* null },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1b, i8* null },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c0, i8* @associatedc0 },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c1a, i8* @associatedc1 },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c1b, i8* @associatedc1 },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c2a, i8* @associatedc2 },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c2b, i8* @associatedc2 },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1c3, i8* @associatedc3 },
-  { i32, void ()*, i8* } { i32 1, void ()* @orig_dtor1d, i8* null },
-  { i32, void ()*, i8* } { i32 65535, void ()* @orig_dtor65535c0, i8* @associatedc0 },
-  { i32, void ()*, i8* } { i32 65535, void ()* @orig_dtor65535, i8* null },
-  { i32, void ()*, i8* } { i32 65535, void ()* null, i8* null },
-  { i32, void ()*, i8* } { i32 65535, void ()* @after_the_null, i8* null }
+  { i32, ptr, ptr } { i32 0, ptr @orig_dtor0, ptr null },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1a, ptr null },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1b, ptr null },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1c0, ptr @associatedc0 },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1c1a, ptr @associatedc1 },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1c1b, ptr @associatedc1 },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1c2a, ptr @associatedc2 },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1c2b, ptr @associatedc2 },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1c3, ptr @associatedc3 },
+  { i32, ptr, ptr } { i32 1, ptr @orig_dtor1d, ptr null },
+  { i32, ptr, ptr } { i32 65535, ptr @orig_dtor65535c0, ptr @associatedc0 },
+  { i32, ptr, ptr } { i32 65535, ptr @orig_dtor65535, ptr null },
+  { i32, ptr, ptr } { i32 65535, ptr null, ptr null },
+  { i32, ptr, ptr } { i32 65535, ptr @after_the_null, ptr null }
 ]
 
 ; CHECK-LABEL: .functype __cxa_atexit (i32, i32, i32) -> (i32){{$}}

diff  --git a/llvm/test/CodeGen/WebAssembly/main-three-args.ll b/llvm/test/CodeGen/WebAssembly/main-three-args.ll
index 06e7b0c399a2..142d77cc650f 100644
--- a/llvm/test/CodeGen/WebAssembly/main-three-args.ll
+++ b/llvm/test/CodeGen/WebAssembly/main-three-args.ll
@@ -5,7 +5,7 @@
 
 target triple = "wasm32-unknown-unknown"
 
-define i32 @main(i32 %a, i8** %b, i8** %c) {
+define i32 @main(i32 %a, ptr %b, ptr %c) {
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/main-with-args.ll b/llvm/test/CodeGen/WebAssembly/main-with-args.ll
index c6782c397602..f648a7a71878 100644
--- a/llvm/test/CodeGen/WebAssembly/main-with-args.ll
+++ b/llvm/test/CodeGen/WebAssembly/main-with-args.ll
@@ -4,7 +4,7 @@
 
 target triple = "wasm32-unknown-unknown"
 
-define i32 @main(i32 %a, i8** %b) {
+define i32 @main(i32 %a, ptr %b) {
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll b/llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll
index d9ceb86f208d..dccd727362d7 100644
--- a/llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll
+++ b/llvm/test/CodeGen/WebAssembly/mem-intrinsics.ll
@@ -4,57 +4,57 @@
 
 target triple = "wasm32-unknown-unknown"
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1)
 
 ; Test that return values are optimized.
 
 ; CHECK-LABEL: copy_yes:
 ; CHECK:      call     $push0=, memcpy, $0, $1, $2{{$}}
 ; CHECK-NEXT: return   $pop0{{$}}
-define i8* @copy_yes(i8* %dst, i8* %src, i32 %len) {
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false)
-  ret i8* %dst
+define ptr @copy_yes(ptr %dst, ptr %src, i32 %len) {
+  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 %len, i1 false)
+  ret ptr %dst
 }
 
 ; CHECK-LABEL: copy_no:
 ; CHECK:      call     $drop=, memcpy, $0, $1, $2{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @copy_no(i8* %dst, i8* %src, i32 %len) {
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false)
+define void @copy_no(ptr %dst, ptr %src, i32 %len) {
+  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 %len, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: move_yes:
 ; CHECK:      call     $push0=, memmove, $0, $1, $2{{$}}
 ; CHECK-NEXT: return   $pop0{{$}}
-define i8* @move_yes(i8* %dst, i8* %src, i32 %len) {
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false)
-  ret i8* %dst
+define ptr @move_yes(ptr %dst, ptr %src, i32 %len) {
+  call void @llvm.memmove.p0.p0.i32(ptr %dst, ptr %src, i32 %len, i1 false)
+  ret ptr %dst
 }
 
 ; CHECK-LABEL: move_no:
 ; CHECK:      call     $drop=, memmove, $0, $1, $2{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @move_no(i8* %dst, i8* %src, i32 %len) {
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 %len, i1 false)
+define void @move_no(ptr %dst, ptr %src, i32 %len) {
+  call void @llvm.memmove.p0.p0.i32(ptr %dst, ptr %src, i32 %len, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: set_yes:
 ; CHECK:      call     $push0=, memset, $0, $1, $2{{$}}
 ; CHECK-NEXT: return   $pop0{{$}}
-define i8* @set_yes(i8* %dst, i8 %src, i32 %len) {
-  call void @llvm.memset.p0i8.i32(i8* %dst, i8 %src, i32 %len, i1 false)
-  ret i8* %dst
+define ptr @set_yes(ptr %dst, i8 %src, i32 %len) {
+  call void @llvm.memset.p0.i32(ptr %dst, i8 %src, i32 %len, i1 false)
+  ret ptr %dst
 }
 
 ; CHECK-LABEL: set_no:
 ; CHECK:      call     $drop=, memset, $0, $1, $2{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @set_no(i8* %dst, i8 %src, i32 %len) {
-  call void @llvm.memset.p0i8.i32(i8* %dst, i8 %src, i32 %len, i1 false)
+define void @set_no(ptr %dst, i8 %src, i32 %len) {
+  call void @llvm.memset.p0.i32(ptr %dst, i8 %src, i32 %len, i1 false)
   ret void
 }
 
@@ -67,10 +67,8 @@ define void @frame_index() {
 entry:
   %a = alloca [2048 x i8], align 16
   %b = alloca [2048 x i8], align 16
-  %0 = getelementptr inbounds [2048 x i8], [2048 x i8]* %a, i32 0, i32 0
-  %1 = getelementptr inbounds [2048 x i8], [2048 x i8]* %b, i32 0, i32 0
-  call void @llvm.memset.p0i8.i32(i8* align 16 %0, i8 256, i32 1024, i1 false)
-  call void @llvm.memset.p0i8.i32(i8* align 16 %1, i8 256, i32 1024, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 16 %a, i8 256, i32 1024, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 16 %b, i8 256, i32 1024, i1 false)
   ret void
 }
 
@@ -80,9 +78,9 @@ entry:
 
 ; CHECK-LABEL: drop_result:
 ; CHECK: call $drop=, memset, $0, $1, $2
-declare i8* @def()
+declare ptr @def()
 declare void @block_tail_dup()
-define i8* @drop_result(i8* %arg, i8 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
+define ptr @drop_result(ptr %arg, i8 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
 bb:
   %tmp = icmp eq i32 %arg3, 0
   br i1 %tmp, label %bb5, label %bb9
@@ -92,20 +90,20 @@ bb5:
   br i1 %tmp6, label %bb7, label %bb8
 
 bb7:
-  call void @llvm.memset.p0i8.i32(i8* %arg, i8 %arg1, i32 %arg2, i1 false)
+  call void @llvm.memset.p0.i32(ptr %arg, i8 %arg1, i32 %arg2, i1 false)
   br label %bb11
 
 bb8:
   br label %bb11
 
 bb9:
-  %tmp10 = call i8* @def()
+  %tmp10 = call ptr @def()
   br label %bb11
 
 bb11:
-  %tmp12 = phi i8* [ %arg, %bb7 ], [ %arg, %bb8 ], [ %tmp10, %bb9 ]
+  %tmp12 = phi ptr [ %arg, %bb7 ], [ %arg, %bb8 ], [ %tmp10, %bb9 ]
   call void @block_tail_dup()
-  ret i8* %tmp12
+  ret ptr %tmp12
 }
 
 ; This is the same as drop_result, except we let tail dup happen, so the
@@ -113,7 +111,7 @@ bb11:
 
 ; CHECK-LABEL: tail_dup_to_reuse_result:
 ; CHECK: call $push{{[0-9]+}}=, memset, $0, $1, $2
-define i8* @tail_dup_to_reuse_result(i8* %arg, i8 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
+define ptr @tail_dup_to_reuse_result(ptr %arg, i8 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) {
 bb:
   %tmp = icmp eq i32 %arg3, 0
   br i1 %tmp, label %bb5, label %bb9
@@ -123,17 +121,17 @@ bb5:
   br i1 %tmp6, label %bb7, label %bb8
 
 bb7:
-  call void @llvm.memset.p0i8.i32(i8* %arg, i8 %arg1, i32 %arg2, i1 false)
+  call void @llvm.memset.p0.i32(ptr %arg, i8 %arg1, i32 %arg2, i1 false)
   br label %bb11
 
 bb8:
   br label %bb11
 
 bb9:
-  %tmp10 = call i8* @def()
+  %tmp10 = call ptr @def()
   br label %bb11
 
 bb11:
-  %tmp12 = phi i8* [ %arg, %bb7 ], [ %arg, %bb8 ], [ %tmp10, %bb9 ]
-  ret i8* %tmp12
+  %tmp12 = phi ptr [ %arg, %bb7 ], [ %arg, %bb8 ], [ %tmp10, %bb9 ]
+  ret ptr %tmp12
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/multivalue.ll b/llvm/test/CodeGen/WebAssembly/multivalue.ll
index 0080052e8f7a..675009c8f3e5 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue.ll
+++ b/llvm/test/CodeGen/WebAssembly/multivalue.ll
@@ -61,7 +61,7 @@ define %pair @pair_call_return() {
 ; REF:        call_indirect __indirect_function_table, () -> (i32, i64){{$}}
 ; CHECK-NEXT: end_function{{$}}
 ; REGS: call_indirect $push{{[0-9]+}}=, $push{{[0-9]+}}=, $0{{$}}
-define %pair @pair_call_indirect(%pair()* %f) {
+define %pair @pair_call_indirect(ptr %f) {
   %p = call %pair %f()
   ret %pair %p
 }
@@ -242,7 +242,7 @@ define %rpair @pair_pass_through_swap(%pair %p) {
 ; CHECK-NEXT: .LBB{{[0-9]+}}_2:
 ; CHECK-NEXT: end_loop{{$}}
 ; CHECK-NEXT: end_function{{$}}
-define %pair @minimal_loop(i32* %p) {
+define %pair @minimal_loop(ptr %p) {
 entry:
   br label %loop
 loop:

diff  --git a/llvm/test/CodeGen/WebAssembly/negative-base-reg.ll b/llvm/test/CodeGen/WebAssembly/negative-base-reg.ll
index 9ba25b3e4cff..965e4a57b249 100644
--- a/llvm/test/CodeGen/WebAssembly/negative-base-reg.ll
+++ b/llvm/test/CodeGen/WebAssembly/negative-base-reg.ll
@@ -20,8 +20,8 @@ for.body:                                         ; preds = %for.body, %entry
 ; CHECK: i32.const $push{{[0-9]+}}=, args+128
 ; CHECK: i32.add   $push[[L1:[0-9]+]]=,
 ; CHECK: i32.store 0($pop[[L1]])
-  %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @args, i32 0, i32 %i.04
-  store i32 1, i32* %arrayidx, align 4, !tbaa !1
+  %arrayidx = getelementptr inbounds [32 x i32], ptr @args, i32 0, i32 %i.04
+  store i32 1, ptr %arrayidx, align 4, !tbaa !1
   %inc = add nuw nsw i32 %i.04, 1
   %exitcond = icmp eq i32 %inc, 32
   br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !5

diff  --git a/llvm/test/CodeGen/WebAssembly/null-streamer.ll b/llvm/test/CodeGen/WebAssembly/null-streamer.ll
index d0fbce149d72..e8396e3ad456 100644
--- a/llvm/test/CodeGen/WebAssembly/null-streamer.ll
+++ b/llvm/test/CodeGen/WebAssembly/null-streamer.ll
@@ -3,12 +3,12 @@
 
 target triple = "wasm32-unknown-unknown"
 
-declare void @llvm.wasm.throw(i32, i8*)
+declare void @llvm.wasm.throw(i32, ptr)
 declare void @g()
 
-define i32 @test(i8* %p)  {
+define i32 @test(ptr %p)  {
   %n = alloca i32
-  call void @llvm.wasm.throw(i32 0, i8* %p)
+  call void @llvm.wasm.throw(i32 0, ptr %p)
   call void @g()
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/offset-atomics.ll b/llvm/test/CodeGen/WebAssembly/offset-atomics.ll
index c8fff288ce62..466b82e982c3 100644
--- a/llvm/test/CodeGen/WebAssembly/offset-atomics.ll
+++ b/llvm/test/CodeGen/WebAssembly/offset-atomics.ll
@@ -14,8 +14,8 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-LABEL: load_i32_no_offset:
 ; CHECK: i32.atomic.load $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @load_i32_no_offset(i32 *%p) {
-  %v = load atomic i32, i32* %p seq_cst, align 4
+define i32 @load_i32_no_offset(ptr %p) {
+  %v = load atomic i32, ptr %p seq_cst, align 4
   ret i32 %v
 }
 
@@ -23,11 +23,11 @@ define i32 @load_i32_no_offset(i32 *%p) {
 
 ; CHECK-LABEL: load_i32_with_folded_offset:
 ; CHECK: i32.atomic.load $push0=, 24($0){{$}}
-define i32 @load_i32_with_folded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define i32 @load_i32_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = load atomic i32, i32* %s seq_cst, align 4
+  %s = inttoptr i32 %r to ptr
+  %t = load atomic i32, ptr %s seq_cst, align 4
   ret i32 %t
 }
 
@@ -35,9 +35,9 @@ define i32 @load_i32_with_folded_offset(i32* %p) {
 
 ; CHECK-LABEL: load_i32_with_folded_gep_offset:
 ; CHECK: i32.atomic.load $push0=, 24($0){{$}}
-define i32 @load_i32_with_folded_gep_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %t = load atomic i32, i32* %s seq_cst, align 4
+define i32 @load_i32_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %t = load atomic i32, ptr %s seq_cst, align 4
   ret i32 %t
 }
 
@@ -47,9 +47,9 @@ define i32 @load_i32_with_folded_gep_offset(i32* %p) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
-define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  %t = load atomic i32, i32* %s seq_cst, align 4
+define i32 @load_i32_with_unfolded_gep_negative_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  %t = load atomic i32, ptr %s seq_cst, align 4
   ret i32 %t
 }
 
@@ -59,11 +59,11 @@ define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
-define i32 @load_i32_with_unfolded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define i32 @load_i32_with_unfolded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = load atomic i32, i32* %s seq_cst, align 4
+  %s = inttoptr i32 %r to ptr
+  %t = load atomic i32, ptr %s seq_cst, align 4
   ret i32 %t
 }
 
@@ -73,9 +73,9 @@ define i32 @load_i32_with_unfolded_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
-define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
-  %s = getelementptr i32, i32* %p, i32 6
-  %t = load atomic i32, i32* %s seq_cst, align 4
+define i32 @load_i32_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i32, ptr %p, i32 6
+  %t = load atomic i32, ptr %s seq_cst, align 4
   ret i32 %t
 }
 
@@ -85,8 +85,8 @@ define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.load $push1=, 42($pop0){{$}}
 define i32 @load_i32_from_numeric_address() {
-  %s = inttoptr i32 42 to i32*
-  %t = load atomic i32, i32* %s seq_cst, align 4
+  %s = inttoptr i32 42 to ptr
+  %t = load atomic i32, ptr %s seq_cst, align 4
   ret i32 %t
 }
 
@@ -95,7 +95,7 @@ define i32 @load_i32_from_numeric_address() {
 ; CHECK: i32.atomic.load $push1=, gv($pop0){{$}}
 @gv = global i32 0
 define i32 @load_i32_from_global_address() {
-  %t = load atomic i32, i32* @gv seq_cst, align 4
+  %t = load atomic i32, ptr @gv seq_cst, align 4
   ret i32 %t
 }
 
@@ -108,8 +108,8 @@ define i32 @load_i32_from_global_address() {
 ; CHECK-LABEL: load_i64_no_offset:
 ; CHECK: i64.atomic.load $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @load_i64_no_offset(i64 *%p) {
-  %v = load atomic i64, i64* %p seq_cst, align 8
+define i64 @load_i64_no_offset(ptr %p) {
+  %v = load atomic i64, ptr %p seq_cst, align 8
   ret i64 %v
 }
 
@@ -117,11 +117,11 @@ define i64 @load_i64_no_offset(i64 *%p) {
 
 ; CHECK-LABEL: load_i64_with_folded_offset:
 ; CHECK: i64.atomic.load $push0=, 24($0){{$}}
-define i64 @load_i64_with_folded_offset(i64* %p) {
-  %q = ptrtoint i64* %p to i32
+define i64 @load_i64_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %t = load atomic i64, i64* %s seq_cst, align 8
+  %s = inttoptr i32 %r to ptr
+  %t = load atomic i64, ptr %s seq_cst, align 8
   ret i64 %t
 }
 
@@ -129,9 +129,9 @@ define i64 @load_i64_with_folded_offset(i64* %p) {
 
 ; CHECK-LABEL: load_i64_with_folded_gep_offset:
 ; CHECK: i64.atomic.load $push0=, 24($0){{$}}
-define i64 @load_i64_with_folded_gep_offset(i64* %p) {
-  %s = getelementptr inbounds i64, i64* %p, i32 3
-  %t = load atomic i64, i64* %s seq_cst, align 8
+define i64 @load_i64_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i64, ptr %p, i32 3
+  %t = load atomic i64, ptr %s seq_cst, align 8
   ret i64 %t
 }
 
@@ -141,9 +141,9 @@ define i64 @load_i64_with_folded_gep_offset(i64* %p) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
-define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
-  %s = getelementptr inbounds i64, i64* %p, i32 -3
-  %t = load atomic i64, i64* %s seq_cst, align 8
+define i64 @load_i64_with_unfolded_gep_negative_offset(ptr %p) {
+  %s = getelementptr inbounds i64, ptr %p, i32 -3
+  %t = load atomic i64, ptr %s seq_cst, align 8
   ret i64 %t
 }
 
@@ -153,11 +153,11 @@ define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
-define i64 @load_i64_with_unfolded_offset(i64* %p) {
-  %q = ptrtoint i64* %p to i32
+define i64 @load_i64_with_unfolded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %t = load atomic i64, i64* %s seq_cst, align 8
+  %s = inttoptr i32 %r to ptr
+  %t = load atomic i64, ptr %s seq_cst, align 8
   ret i64 %t
 }
 
@@ -167,9 +167,9 @@ define i64 @load_i64_with_unfolded_offset(i64* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
-define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
-  %s = getelementptr i64, i64* %p, i32 3
-  %t = load atomic i64, i64* %s seq_cst, align 8
+define i64 @load_i64_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i64, ptr %p, i32 3
+  %t = load atomic i64, ptr %s seq_cst, align 8
   ret i64 %t
 }
 
@@ -183,8 +183,8 @@ define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
 ; CHECK-NEXT: .functype store_i32_no_offset (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @store_i32_no_offset(i32 *%p, i32 %v) {
-  store atomic i32 %v, i32* %p seq_cst, align 4
+define void @store_i32_no_offset(ptr %p, i32 %v) {
+  store atomic i32 %v, ptr %p seq_cst, align 4
   ret void
 }
 
@@ -192,11 +192,11 @@ define void @store_i32_no_offset(i32 *%p, i32 %v) {
 
 ; CHECK-LABEL: store_i32_with_folded_offset:
 ; CHECK: i32.atomic.store 24($0), $pop0{{$}}
-define void @store_i32_with_folded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define void @store_i32_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  store atomic i32 0, i32* %s seq_cst, align 4
+  %s = inttoptr i32 %r to ptr
+  store atomic i32 0, ptr %s seq_cst, align 4
   ret void
 }
 
@@ -204,9 +204,9 @@ define void @store_i32_with_folded_offset(i32* %p) {
 
 ; CHECK-LABEL: store_i32_with_folded_gep_offset:
 ; CHECK: i32.atomic.store 24($0), $pop0{{$}}
-define void @store_i32_with_folded_gep_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  store atomic i32 0, i32* %s seq_cst, align 4
+define void @store_i32_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  store atomic i32 0, ptr %s seq_cst, align 4
   ret void
 }
 
@@ -216,9 +216,9 @@ define void @store_i32_with_folded_gep_offset(i32* %p) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
-define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  store atomic i32 0, i32* %s seq_cst, align 4
+define void @store_i32_with_unfolded_gep_negative_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  store atomic i32 0, ptr %s seq_cst, align 4
   ret void
 }
 
@@ -228,11 +228,11 @@ define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
-define void @store_i32_with_unfolded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define void @store_i32_with_unfolded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  store atomic i32 0, i32* %s seq_cst, align 4
+  %s = inttoptr i32 %r to ptr
+  store atomic i32 0, ptr %s seq_cst, align 4
   ret void
 }
 
@@ -242,9 +242,9 @@ define void @store_i32_with_unfolded_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
-define void @store_i32_with_unfolded_gep_offset(i32* %p) {
-  %s = getelementptr i32, i32* %p, i32 6
-  store atomic i32 0, i32* %s seq_cst, align 4
+define void @store_i32_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i32, ptr %p, i32 6
+  store atomic i32 0, ptr %s seq_cst, align 4
   ret void
 }
 
@@ -255,8 +255,8 @@ define void @store_i32_with_unfolded_gep_offset(i32* %p) {
 ; CHECK-NEXT: i32.const $push1=, 0{{$}}
 ; CHECK-NEXT: i32.atomic.store 42($pop0), $pop1{{$}}
 define void @store_i32_to_numeric_address() {
-  %s = inttoptr i32 42 to i32*
-  store atomic i32 0, i32* %s seq_cst, align 4
+  %s = inttoptr i32 42 to ptr
+  store atomic i32 0, ptr %s seq_cst, align 4
   ret void
 }
 
@@ -265,7 +265,7 @@ define void @store_i32_to_numeric_address() {
 ; CHECK: i32.const $push1=, 0{{$}}
 ; CHECK: i32.atomic.store gv($pop0), $pop1{{$}}
 define void @store_i32_to_global_address() {
-  store atomic i32 0, i32* @gv seq_cst, align 4
+  store atomic i32 0, ptr @gv seq_cst, align 4
   ret void
 }
 
@@ -279,8 +279,8 @@ define void @store_i32_to_global_address() {
 ; CHECK-NEXT: .functype store_i64_no_offset (i32, i64) -> (){{$}}
 ; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @store_i64_no_offset(i64 *%p, i64 %v) {
-  store atomic i64 %v, i64* %p seq_cst, align 8
+define void @store_i64_no_offset(ptr %p, i64 %v) {
+  store atomic i64 %v, ptr %p seq_cst, align 8
   ret void
 }
 
@@ -288,11 +288,11 @@ define void @store_i64_no_offset(i64 *%p, i64 %v) {
 
 ; CHECK-LABEL: store_i64_with_folded_offset:
 ; CHECK: i64.atomic.store 24($0), $pop0{{$}}
-define void @store_i64_with_folded_offset(i64* %p) {
-  %q = ptrtoint i64* %p to i32
+define void @store_i64_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  store atomic i64 0, i64* %s seq_cst, align 8
+  %s = inttoptr i32 %r to ptr
+  store atomic i64 0, ptr %s seq_cst, align 8
   ret void
 }
 
@@ -300,9 +300,9 @@ define void @store_i64_with_folded_offset(i64* %p) {
 
 ; CHECK-LABEL: store_i64_with_folded_gep_offset:
 ; CHECK: i64.atomic.store 24($0), $pop0{{$}}
-define void @store_i64_with_folded_gep_offset(i64* %p) {
-  %s = getelementptr inbounds i64, i64* %p, i32 3
-  store atomic i64 0, i64* %s seq_cst, align 8
+define void @store_i64_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i64, ptr %p, i32 3
+  store atomic i64 0, ptr %s seq_cst, align 8
   ret void
 }
 
@@ -312,9 +312,9 @@ define void @store_i64_with_folded_gep_offset(i64* %p) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
-define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) {
-  %s = getelementptr inbounds i64, i64* %p, i32 -3
-  store atomic i64 0, i64* %s seq_cst, align 8
+define void @store_i64_with_unfolded_gep_negative_offset(ptr %p) {
+  %s = getelementptr inbounds i64, ptr %p, i32 -3
+  store atomic i64 0, ptr %s seq_cst, align 8
   ret void
 }
 
@@ -324,11 +324,11 @@ define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
-define void @store_i64_with_unfolded_offset(i64* %p) {
-  %q = ptrtoint i64* %p to i32
+define void @store_i64_with_unfolded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  store atomic i64 0, i64* %s seq_cst, align 8
+  %s = inttoptr i32 %r to ptr
+  store atomic i64 0, ptr %s seq_cst, align 8
   ret void
 }
 
@@ -338,9 +338,9 @@ define void @store_i64_with_unfolded_offset(i64* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
-define void @store_i64_with_unfolded_gep_offset(i64* %p) {
-  %s = getelementptr i64, i64* %p, i32 3
-  store atomic i64 0, i64* %s seq_cst, align 8
+define void @store_i64_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i64, ptr %p, i32 3
+  store atomic i64 0, ptr %s seq_cst, align 8
   ret void
 }
 
@@ -353,11 +353,11 @@ define void @store_i64_with_unfolded_gep_offset(i64* %p) {
 ; CHECK-LABEL: load_i8_i32_s_with_folded_offset:
 ; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
-define i32 @load_i8_i32_s_with_folded_offset(i8* %p) {
-  %q = ptrtoint i8* %p to i32
+define i32 @load_i8_i32_s_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
-  %t = load atomic i8, i8* %s seq_cst, align 1
+  %s = inttoptr i32 %r to ptr
+  %t = load atomic i8, ptr %s seq_cst, align 1
   %u = sext i8 %t to i32
   ret i32 %u
 }
@@ -366,11 +366,11 @@ define i32 @load_i8_i32_s_with_folded_offset(i8* %p) {
 ; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
 ; CHECK: i32.atomic.load $push0=, 24($0){{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push1=, $pop0{{$}}
-define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define i64 @load_i32_i64_s_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = load atomic i32, i32* %s seq_cst, align 4
+  %s = inttoptr i32 %r to ptr
+  %t = load atomic i32, ptr %s seq_cst, align 4
   %u = sext i32 %t to i64
   ret i64 %u
 }
@@ -380,9 +380,9 @@ define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
 ; CHECK-LABEL: load_i8_i32_s_with_folded_gep_offset:
 ; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
-define i32 @load_i8_i32_s_with_folded_gep_offset(i8* %p) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
-  %t = load atomic i8, i8* %s seq_cst, align 1
+define i32 @load_i8_i32_s_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
+  %t = load atomic i8, ptr %s seq_cst, align 1
   %u = sext i8 %t to i32
   ret i32 %u
 }
@@ -390,9 +390,9 @@ define i32 @load_i8_i32_s_with_folded_gep_offset(i8* %p) {
 ; CHECK-LABEL: load_i16_i32_s_with_folded_gep_offset:
 ; CHECK: i32.atomic.load16_u $push0=, 48($0){{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0
-define i32 @load_i16_i32_s_with_folded_gep_offset(i16* %p) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
-  %t = load atomic i16, i16* %s seq_cst, align 2
+define i32 @load_i16_i32_s_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
+  %t = load atomic i16, ptr %s seq_cst, align 2
   %u = sext i16 %t to i32
   ret i32 %u
 }
@@ -400,9 +400,9 @@ define i32 @load_i16_i32_s_with_folded_gep_offset(i16* %p) {
 ; CHECK-LABEL: load_i16_i64_s_with_folded_gep_offset:
 ; CHECK: i64.atomic.load16_u $push0=, 48($0){{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0
-define i64 @load_i16_i64_s_with_folded_gep_offset(i16* %p) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
-  %t = load atomic i16, i16* %s seq_cst, align 2
+define i64 @load_i16_i64_s_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
+  %t = load atomic i16, ptr %s seq_cst, align 2
   %u = sext i16 %t to i64
   ret i64 %u
 }
@@ -415,9 +415,9 @@ define i64 @load_i16_i64_s_with_folded_gep_offset(i16* %p) {
 ; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i32 @load_i8_i32_s_with_folded_or_offset(i32 %x) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
-  %t1 = load atomic i8, i8* %arrayidx seq_cst, align 1
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
+  %t1 = load atomic i8, ptr %arrayidx seq_cst, align 1
   %conv = sext i8 %t1 to i32
   ret i32 %conv
 }
@@ -427,9 +427,9 @@ define i32 @load_i8_i32_s_with_folded_or_offset(i32 %x) {
 ; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i64 @load_i8_i64_s_with_folded_or_offset(i32 %x) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
-  %t1 = load atomic i8, i8* %arrayidx seq_cst, align 1
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
+  %t1 = load atomic i8, ptr %arrayidx seq_cst, align 1
   %conv = sext i8 %t1 to i64
   ret i64 %conv
 }
@@ -441,8 +441,8 @@ define i64 @load_i8_i64_s_with_folded_or_offset(i32 %x) {
 ; CHECK: i32.atomic.load16_u $push1=, 42($pop0){{$}}
 ; CHECK-NEXT: i32.extend16_s $push2=, $pop1
 define i32 @load_i16_i32_s_from_numeric_address() {
-  %s = inttoptr i32 42 to i16*
-  %t = load atomic i16, i16* %s seq_cst, align 2
+  %s = inttoptr i32 42 to ptr
+  %t = load atomic i16, ptr %s seq_cst, align 2
   %u = sext i16 %t to i32
   ret i32 %u
 }
@@ -453,7 +453,7 @@ define i32 @load_i16_i32_s_from_numeric_address() {
 ; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
 @gv8 = global i8 0
 define i32 @load_i8_i32_s_from_global_address() {
-  %t = load atomic i8, i8* @gv8 seq_cst, align 1
+  %t = load atomic i8, ptr @gv8 seq_cst, align 1
   %u = sext i8 %t to i32
   ret i32 %u
 }
@@ -466,22 +466,22 @@ define i32 @load_i8_i32_s_from_global_address() {
 
 ; CHECK-LABEL: load_i8_i32_z_with_folded_offset:
 ; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
-define i32 @load_i8_i32_z_with_folded_offset(i8* %p) {
-  %q = ptrtoint i8* %p to i32
+define i32 @load_i8_i32_z_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
-  %t = load atomic i8, i8* %s seq_cst, align 1
+  %s = inttoptr i32 %r to ptr
+  %t = load atomic i8, ptr %s seq_cst, align 1
   %u = zext i8 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i32_i64_z_with_folded_offset:
 ; CHECK: i64.atomic.load32_u $push0=, 24($0){{$}}
-define i64 @load_i32_i64_z_with_folded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define i64 @load_i32_i64_z_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = load atomic i32, i32* %s seq_cst, align 4
+  %s = inttoptr i32 %r to ptr
+  %t = load atomic i32, ptr %s seq_cst, align 4
   %u = zext i32 %t to i64
   ret i64 %u
 }
@@ -490,27 +490,27 @@ define i64 @load_i32_i64_z_with_folded_offset(i32* %p) {
 
 ; CHECK-LABEL: load_i8_i32_z_with_folded_gep_offset:
 ; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
-define i32 @load_i8_i32_z_with_folded_gep_offset(i8* %p) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
-  %t = load atomic i8, i8* %s seq_cst, align 1
+define i32 @load_i8_i32_z_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
+  %t = load atomic i8, ptr %s seq_cst, align 1
   %u = zext i8 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i16_i32_z_with_folded_gep_offset:
 ; CHECK: i32.atomic.load16_u $push0=, 48($0){{$}}
-define i32 @load_i16_i32_z_with_folded_gep_offset(i16* %p) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
-  %t = load atomic i16, i16* %s seq_cst, align 2
+define i32 @load_i16_i32_z_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
+  %t = load atomic i16, ptr %s seq_cst, align 2
   %u = zext i16 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i16_i64_z_with_folded_gep_offset:
 ; CHECK: i64.atomic.load16_u $push0=, 48($0){{$}}
-define i64 @load_i16_i64_z_with_folded_gep_offset(i16* %p) {
-  %s = getelementptr inbounds i16, i16* %p, i64 24
-  %t = load atomic i16, i16* %s seq_cst, align 2
+define i64 @load_i16_i64_z_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i16, ptr %p, i64 24
+  %t = load atomic i16, ptr %s seq_cst, align 2
   %u = zext i16 %t to i64
   ret i64 %u
 }
@@ -522,9 +522,9 @@ define i64 @load_i16_i64_z_with_folded_gep_offset(i16* %p) {
 ; CHECK: i32.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
 define i32 @load_i8_i32_z_with_folded_or_offset(i32 %x) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
-  %t1 = load atomic i8, i8* %arrayidx seq_cst, align 1
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
+  %t1 = load atomic i8, ptr %arrayidx seq_cst, align 1
   %conv = zext i8 %t1 to i32
   ret i32 %conv
 }
@@ -533,9 +533,9 @@ define i32 @load_i8_i32_z_with_folded_or_offset(i32 %x) {
 ; CHECK: i64.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
 define i64 @load_i8_i64_z_with_folded_or_offset(i32 %x) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
-  %t1 = load atomic i8, i8* %arrayidx seq_cst, align 1
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
+  %t1 = load atomic i8, ptr %arrayidx seq_cst, align 1
   %conv = zext i8 %t1 to i64
   ret i64 %conv
 }
@@ -546,8 +546,8 @@ define i64 @load_i8_i64_z_with_folded_or_offset(i32 %x) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.load16_u $push1=, 42($pop0){{$}}
 define i32 @load_i16_i32_z_from_numeric_address() {
-  %s = inttoptr i32 42 to i16*
-  %t = load atomic i16, i16* %s seq_cst, align 2
+  %s = inttoptr i32 42 to ptr
+  %t = load atomic i16, ptr %s seq_cst, align 2
   %u = zext i16 %t to i32
   ret i32 %u
 }
@@ -556,7 +556,7 @@ define i32 @load_i16_i32_z_from_numeric_address() {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.load8_u $push1=, gv8($pop0){{$}}
 define i32 @load_i8_i32_z_from_global_address() {
-  %t = load atomic i8, i8* @gv8 seq_cst, align 1
+  %t = load atomic i8, ptr @gv8 seq_cst, align 1
   %u = zext i8 %t to i32
   ret i32 %u
 }
@@ -566,8 +566,8 @@ define i32 @load_i8_i32_z_from_global_address() {
 ; CHECK-LABEL: load_i8_i32_retvalue:
 ; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i8 @load_i8_i32_retvalue(i8 *%p) {
-  %v = load atomic i8, i8* %p seq_cst, align 1
+define i8 @load_i8_i32_retvalue(ptr %p) {
+  %v = load atomic i8, ptr %p seq_cst, align 1
   ret i8 %v
 }
 
@@ -579,23 +579,23 @@ define i8 @load_i8_i32_retvalue(i8 *%p) {
 
 ; CHECK-LABEL: store_i8_i32_with_folded_offset:
 ; CHECK: i32.atomic.store8 24($0), $1{{$}}
-define void @store_i8_i32_with_folded_offset(i8* %p, i32 %v) {
-  %q = ptrtoint i8* %p to i32
+define void @store_i8_i32_with_folded_offset(ptr %p, i32 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
+  %s = inttoptr i32 %r to ptr
   %t = trunc i32 %v to i8
-  store atomic i8 %t, i8* %s seq_cst, align 1
+  store atomic i8 %t, ptr %s seq_cst, align 1
   ret void
 }
 
 ; CHECK-LABEL: store_i32_i64_with_folded_offset:
 ; CHECK: i64.atomic.store32 24($0), $1{{$}}
-define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) {
-  %q = ptrtoint i32* %p to i32
+define void @store_i32_i64_with_folded_offset(ptr %p, i64 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
+  %s = inttoptr i32 %r to ptr
   %t = trunc i64 %v to i32
-  store atomic i32 %t, i32* %s seq_cst, align 4
+  store atomic i32 %t, ptr %s seq_cst, align 4
   ret void
 }
 
@@ -603,28 +603,28 @@ define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) {
 
 ; CHECK-LABEL: store_i8_i32_with_folded_gep_offset:
 ; CHECK: i32.atomic.store8 24($0), $1{{$}}
-define void @store_i8_i32_with_folded_gep_offset(i8* %p, i32 %v) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
+define void @store_i8_i32_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
   %t = trunc i32 %v to i8
-  store atomic i8 %t, i8* %s seq_cst, align 1
+  store atomic i8 %t, ptr %s seq_cst, align 1
   ret void
 }
 
 ; CHECK-LABEL: store_i16_i32_with_folded_gep_offset:
 ; CHECK: i32.atomic.store16 48($0), $1{{$}}
-define void @store_i16_i32_with_folded_gep_offset(i16* %p, i32 %v) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define void @store_i16_i32_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %t = trunc i32 %v to i16
-  store atomic i16 %t, i16* %s seq_cst, align 2
+  store atomic i16 %t, ptr %s seq_cst, align 2
   ret void
 }
 
 ; CHECK-LABEL: store_i16_i64_with_folded_gep_offset:
 ; CHECK: i64.atomic.store16 48($0), $1{{$}}
-define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define void @store_i16_i64_with_folded_gep_offset(ptr %p, i64 %v) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %t = trunc i64 %v to i16
-  store atomic i16 %t, i16* %s seq_cst, align 2
+  store atomic i16 %t, ptr %s seq_cst, align 2
   ret void
 }
 
@@ -635,10 +635,10 @@ define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) {
 ; CHECK: i32.atomic.store8 2($pop{{[0-9]+}}), $1{{$}}
 define void @store_i8_i32_with_folded_or_offset(i32 %x, i32 %v) {
   %and = and i32 %x, -4
-  %p = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
+  %p = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 2
   %t = trunc i32 %v to i8
-  store atomic i8 %t, i8* %arrayidx seq_cst, align 1
+  store atomic i8 %t, ptr %arrayidx seq_cst, align 1
   ret void
 }
 
@@ -646,10 +646,10 @@ define void @store_i8_i32_with_folded_or_offset(i32 %x, i32 %v) {
 ; CHECK: i64.atomic.store8 2($pop{{[0-9]+}}), $1{{$}}
 define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
   %and = and i32 %x, -4
-  %p = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
+  %p = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 2
   %t = trunc i64 %v to i8
-  store atomic i8 %t, i8* %arrayidx seq_cst, align 1
+  store atomic i8 %t, ptr %arrayidx seq_cst, align 1
   ret void
 }
 
@@ -665,8 +665,8 @@ define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
 ; CHECK-NEXT: .functype rmw_add_i32_no_offset (i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @rmw_add_i32_no_offset(i32* %p, i32 %v) {
-  %old = atomicrmw add i32* %p, i32 %v seq_cst
+define i32 @rmw_add_i32_no_offset(ptr %p, i32 %v) {
+  %old = atomicrmw add ptr %p, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -674,11 +674,11 @@ define i32 @rmw_add_i32_no_offset(i32* %p, i32 %v) {
 
 ; CHECK-LABEL: rmw_add_i32_with_folded_offset:
 ; CHECK: i32.atomic.rmw.add $push0=, 24($0), $1{{$}}
-define i32 @rmw_add_i32_with_folded_offset(i32* %p, i32 %v) {
-  %q = ptrtoint i32* %p to i32
+define i32 @rmw_add_i32_with_folded_offset(ptr %p, i32 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %old = atomicrmw add i32* %s, i32 %v seq_cst
+  %s = inttoptr i32 %r to ptr
+  %old = atomicrmw add ptr %s, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -686,9 +686,9 @@ define i32 @rmw_add_i32_with_folded_offset(i32* %p, i32 %v) {
 
 ; CHECK-LABEL: rmw_add_i32_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw.add $push0=, 24($0), $1{{$}}
-define i32 @rmw_add_i32_with_folded_gep_offset(i32* %p, i32 %v) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %old = atomicrmw add i32* %s, i32 %v seq_cst
+define i32 @rmw_add_i32_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %old = atomicrmw add ptr %s, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -698,9 +698,9 @@ define i32 @rmw_add_i32_with_folded_gep_offset(i32* %p, i32 %v) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
-define i32 @rmw_add_i32_with_unfolded_gep_negative_offset(i32* %p, i32 %v) {
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  %old = atomicrmw add i32* %s, i32 %v seq_cst
+define i32 @rmw_add_i32_with_unfolded_gep_negative_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  %old = atomicrmw add ptr %s, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -710,11 +710,11 @@ define i32 @rmw_add_i32_with_unfolded_gep_negative_offset(i32* %p, i32 %v) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
-define i32 @rmw_add_i32_with_unfolded_offset(i32* %p, i32 %v) {
-  %q = ptrtoint i32* %p to i32
+define i32 @rmw_add_i32_with_unfolded_offset(ptr %p, i32 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %old = atomicrmw add i32* %s, i32 %v seq_cst
+  %s = inttoptr i32 %r to ptr
+  %old = atomicrmw add ptr %s, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -724,9 +724,9 @@ define i32 @rmw_add_i32_with_unfolded_offset(i32* %p, i32 %v) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
-define i32 @rmw_add_i32_with_unfolded_gep_offset(i32* %p, i32 %v) {
-  %s = getelementptr i32, i32* %p, i32 6
-  %old = atomicrmw add i32* %s, i32 %v seq_cst
+define i32 @rmw_add_i32_with_unfolded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr i32, ptr %p, i32 6
+  %old = atomicrmw add ptr %s, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -736,8 +736,8 @@ define i32 @rmw_add_i32_with_unfolded_gep_offset(i32* %p, i32 %v) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.rmw.add $push1=, 42($pop0), $0{{$}}
 define i32 @rmw_add_i32_from_numeric_address(i32 %v) {
-  %s = inttoptr i32 42 to i32*
-  %old = atomicrmw add i32* %s, i32 %v seq_cst
+  %s = inttoptr i32 42 to ptr
+  %old = atomicrmw add ptr %s, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -745,7 +745,7 @@ define i32 @rmw_add_i32_from_numeric_address(i32 %v) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.rmw.add $push1=, gv($pop0), $0{{$}}
 define i32 @rmw_add_i32_from_global_address(i32 %v) {
-  %old = atomicrmw add i32* @gv, i32 %v seq_cst
+  %old = atomicrmw add ptr @gv, i32 %v seq_cst
   ret i32 %old
 }
 
@@ -759,8 +759,8 @@ define i32 @rmw_add_i32_from_global_address(i32 %v) {
 ; CHECK-NEXT: .functype rmw_add_i64_no_offset (i32, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.add $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @rmw_add_i64_no_offset(i64* %p, i64 %v) {
-  %old = atomicrmw add i64* %p, i64 %v seq_cst
+define i64 @rmw_add_i64_no_offset(ptr %p, i64 %v) {
+  %old = atomicrmw add ptr %p, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -768,11 +768,11 @@ define i64 @rmw_add_i64_no_offset(i64* %p, i64 %v) {
 
 ; CHECK-LABEL: rmw_add_i64_with_folded_offset:
 ; CHECK: i64.atomic.rmw.add $push0=, 24($0), $1{{$}}
-define i64 @rmw_add_i64_with_folded_offset(i64* %p, i64 %v) {
-  %q = ptrtoint i64* %p to i32
+define i64 @rmw_add_i64_with_folded_offset(ptr %p, i64 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %old = atomicrmw add i64* %s, i64 %v seq_cst
+  %s = inttoptr i32 %r to ptr
+  %old = atomicrmw add ptr %s, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -780,9 +780,9 @@ define i64 @rmw_add_i64_with_folded_offset(i64* %p, i64 %v) {
 
 ; CHECK-LABEL: rmw_add_i64_with_folded_gep_offset:
 ; CHECK: i64.atomic.rmw.add $push0=, 24($0), $1{{$}}
-define i64 @rmw_add_i64_with_folded_gep_offset(i64* %p, i64 %v) {
-  %s = getelementptr inbounds i64, i64* %p, i32 3
-  %old = atomicrmw add i64* %s, i64 %v seq_cst
+define i64 @rmw_add_i64_with_folded_gep_offset(ptr %p, i64 %v) {
+  %s = getelementptr inbounds i64, ptr %p, i32 3
+  %old = atomicrmw add ptr %s, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -792,9 +792,9 @@ define i64 @rmw_add_i64_with_folded_gep_offset(i64* %p, i64 %v) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
-define i64 @rmw_add_i64_with_unfolded_gep_negative_offset(i64* %p, i64 %v) {
-  %s = getelementptr inbounds i64, i64* %p, i32 -3
-  %old = atomicrmw add i64* %s, i64 %v seq_cst
+define i64 @rmw_add_i64_with_unfolded_gep_negative_offset(ptr %p, i64 %v) {
+  %s = getelementptr inbounds i64, ptr %p, i32 -3
+  %old = atomicrmw add ptr %s, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -804,11 +804,11 @@ define i64 @rmw_add_i64_with_unfolded_gep_negative_offset(i64* %p, i64 %v) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
-define i64 @rmw_add_i64_with_unfolded_offset(i64* %p, i64 %v) {
-  %q = ptrtoint i64* %p to i32
+define i64 @rmw_add_i64_with_unfolded_offset(ptr %p, i64 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %old = atomicrmw add i64* %s, i64 %v seq_cst
+  %s = inttoptr i32 %r to ptr
+  %old = atomicrmw add ptr %s, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -818,9 +818,9 @@ define i64 @rmw_add_i64_with_unfolded_offset(i64* %p, i64 %v) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.rmw.add $push2=, 0($pop1), $1{{$}}
-define i64 @rmw_add_i64_with_unfolded_gep_offset(i64* %p, i64 %v) {
-  %s = getelementptr i64, i64* %p, i32 3
-  %old = atomicrmw add i64* %s, i64 %v seq_cst
+define i64 @rmw_add_i64_with_unfolded_gep_offset(ptr %p, i64 %v) {
+  %s = getelementptr i64, ptr %p, i32 3
+  %old = atomicrmw add ptr %s, i64 %v seq_cst
   ret i64 %old
 }
 
@@ -833,12 +833,12 @@ define i64 @rmw_add_i64_with_unfolded_gep_offset(i64* %p, i64 %v) {
 ; CHECK-LABEL: rmw_add_i8_i32_s_with_folded_offset:
 ; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
-define i32 @rmw_add_i8_i32_s_with_folded_offset(i8* %p, i32 %v) {
-  %q = ptrtoint i8* %p to i32
+define i32 @rmw_add_i8_i32_s_with_folded_offset(ptr %p, i32 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
+  %s = inttoptr i32 %r to ptr
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %s, i8 %t seq_cst
+  %old = atomicrmw add ptr %s, i8 %t seq_cst
   %u = sext i8 %old to i32
   ret i32 %u
 }
@@ -848,12 +848,12 @@ define i32 @rmw_add_i8_i32_s_with_folded_offset(i8* %p, i32 %v) {
 ; CHECK: i32.wrap_i64 $push0=, $1
 ; CHECK-NEXT: i32.atomic.rmw.add $push1=, 24($0), $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push2=, $pop1{{$}}
-define i64 @rmw_add_i32_i64_s_with_folded_offset(i32* %p, i64 %v) {
-  %q = ptrtoint i32* %p to i32
+define i64 @rmw_add_i32_i64_s_with_folded_offset(ptr %p, i64 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
+  %s = inttoptr i32 %r to ptr
   %t = trunc i64 %v to i32
-  %old = atomicrmw add i32* %s, i32 %t seq_cst
+  %old = atomicrmw add ptr %s, i32 %t seq_cst
   %u = sext i32 %old to i64
   ret i64 %u
 }
@@ -863,10 +863,10 @@ define i64 @rmw_add_i32_i64_s_with_folded_offset(i32* %p, i64 %v) {
 ; CHECK-LABEL: rmw_add_i8_i32_s_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
-define i32 @rmw_add_i8_i32_s_with_folded_gep_offset(i8* %p, i32 %v) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
+define i32 @rmw_add_i8_i32_s_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %s, i8 %t seq_cst
+  %old = atomicrmw add ptr %s, i8 %t seq_cst
   %u = sext i8 %old to i32
   ret i32 %u
 }
@@ -874,10 +874,10 @@ define i32 @rmw_add_i8_i32_s_with_folded_gep_offset(i8* %p, i32 %v) {
 ; CHECK-LABEL: rmw_add_i16_i32_s_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0
-define i32 @rmw_add_i16_i32_s_with_folded_gep_offset(i16* %p, i32 %v) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define i32 @rmw_add_i16_i32_s_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %t = trunc i32 %v to i16
-  %old = atomicrmw add i16* %s, i16 %t seq_cst
+  %old = atomicrmw add ptr %s, i16 %t seq_cst
   %u = sext i16 %old to i32
   ret i32 %u
 }
@@ -885,10 +885,10 @@ define i32 @rmw_add_i16_i32_s_with_folded_gep_offset(i16* %p, i32 %v) {
 ; CHECK-LABEL: rmw_add_i16_i64_s_with_folded_gep_offset:
 ; CHECK: i64.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0
-define i64 @rmw_add_i16_i64_s_with_folded_gep_offset(i16* %p, i64 %v) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define i64 @rmw_add_i16_i64_s_with_folded_gep_offset(ptr %p, i64 %v) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %t = trunc i64 %v to i16
-  %old = atomicrmw add i16* %s, i16 %t seq_cst
+  %old = atomicrmw add ptr %s, i16 %t seq_cst
   %u = sext i16 %old to i64
   ret i64 %u
 }
@@ -901,10 +901,10 @@ define i64 @rmw_add_i16_i64_s_with_folded_gep_offset(i16* %p, i64 %v) {
 ; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i32 @rmw_add_i8_i32_s_with_folded_or_offset(i32 %x, i32 %v) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %arrayidx, i8 %t seq_cst
+  %old = atomicrmw add ptr %arrayidx, i8 %t seq_cst
   %conv = sext i8 %old to i32
   ret i32 %conv
 }
@@ -914,10 +914,10 @@ define i32 @rmw_add_i8_i32_s_with_folded_or_offset(i32 %x, i32 %v) {
 ; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i64 @rmw_add_i8_i64_s_with_folded_or_offset(i32 %x, i64 %v) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
   %t = trunc i64 %v to i8
-  %old = atomicrmw add i8* %arrayidx, i8 %t seq_cst
+  %old = atomicrmw add ptr %arrayidx, i8 %t seq_cst
   %conv = sext i8 %old to i64
   ret i64 %conv
 }
@@ -929,9 +929,9 @@ define i64 @rmw_add_i8_i64_s_with_folded_or_offset(i32 %x, i64 %v) {
 ; CHECK: i32.atomic.rmw16.add_u $push1=, 42($pop0), $0{{$}}
 ; CHECK-NEXT: i32.extend16_s $push2=, $pop1
 define i32 @rmw_add_i16_i32_s_from_numeric_address(i32 %v) {
-  %s = inttoptr i32 42 to i16*
+  %s = inttoptr i32 42 to ptr
   %t = trunc i32 %v to i16
-  %old = atomicrmw add i16* %s, i16 %t seq_cst
+  %old = atomicrmw add ptr %s, i16 %t seq_cst
   %u = sext i16 %old to i32
   ret i32 %u
 }
@@ -942,7 +942,7 @@ define i32 @rmw_add_i16_i32_s_from_numeric_address(i32 %v) {
 ; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
 define i32 @rmw_add_i8_i32_s_from_global_address(i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* @gv8, i8 %t seq_cst
+  %old = atomicrmw add ptr @gv8, i8 %t seq_cst
   %u = sext i8 %old to i32
   ret i32 %u
 }
@@ -955,24 +955,24 @@ define i32 @rmw_add_i8_i32_s_from_global_address(i32 %v) {
 
 ; CHECK-LABEL: rmw_add_i8_i32_z_with_folded_offset:
 ; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
-define i32 @rmw_add_i8_i32_z_with_folded_offset(i8* %p, i32 %v) {
-  %q = ptrtoint i8* %p to i32
+define i32 @rmw_add_i8_i32_z_with_folded_offset(ptr %p, i32 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
+  %s = inttoptr i32 %r to ptr
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %s, i8 %t seq_cst
+  %old = atomicrmw add ptr %s, i8 %t seq_cst
   %u = zext i8 %old to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: rmw_add_i32_i64_z_with_folded_offset:
 ; CHECK: i64.atomic.rmw32.add_u $push0=, 24($0), $1{{$}}
-define i64 @rmw_add_i32_i64_z_with_folded_offset(i32* %p, i64 %v) {
-  %q = ptrtoint i32* %p to i32
+define i64 @rmw_add_i32_i64_z_with_folded_offset(ptr %p, i64 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
+  %s = inttoptr i32 %r to ptr
   %t = trunc i64 %v to i32
-  %old = atomicrmw add i32* %s, i32 %t seq_cst
+  %old = atomicrmw add ptr %s, i32 %t seq_cst
   %u = zext i32 %old to i64
   ret i64 %u
 }
@@ -981,30 +981,30 @@ define i64 @rmw_add_i32_i64_z_with_folded_offset(i32* %p, i64 %v) {
 
 ; CHECK-LABEL: rmw_add_i8_i32_z_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw8.add_u $push0=, 24($0), $1{{$}}
-define i32 @rmw_add_i8_i32_z_with_folded_gep_offset(i8* %p, i32 %v) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
+define i32 @rmw_add_i8_i32_z_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %s, i8 %t seq_cst
+  %old = atomicrmw add ptr %s, i8 %t seq_cst
   %u = zext i8 %old to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: rmw_add_i16_i32_z_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
-define i32 @rmw_add_i16_i32_z_with_folded_gep_offset(i16* %p, i32 %v) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define i32 @rmw_add_i16_i32_z_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %t = trunc i32 %v to i16
-  %old = atomicrmw add i16* %s, i16 %t seq_cst
+  %old = atomicrmw add ptr %s, i16 %t seq_cst
   %u = zext i16 %old to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: rmw_add_i16_i64_z_with_folded_gep_offset:
 ; CHECK: i64.atomic.rmw16.add_u $push0=, 48($0), $1{{$}}
-define i64 @rmw_add_i16_i64_z_with_folded_gep_offset(i16* %p, i64 %v) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define i64 @rmw_add_i16_i64_z_with_folded_gep_offset(ptr %p, i64 %v) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %t = trunc i64 %v to i16
-  %old = atomicrmw add i16* %s, i16 %t seq_cst
+  %old = atomicrmw add ptr %s, i16 %t seq_cst
   %u = zext i16 %old to i64
   ret i64 %u
 }
@@ -1016,10 +1016,10 @@ define i64 @rmw_add_i16_i64_z_with_folded_gep_offset(i16* %p, i64 %v) {
 ; CHECK: i32.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
 define i32 @rmw_add_i8_i32_z_with_folded_or_offset(i32 %x, i32 %v) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %arrayidx, i8 %t seq_cst
+  %old = atomicrmw add ptr %arrayidx, i8 %t seq_cst
   %conv = zext i8 %old to i32
   ret i32 %conv
 }
@@ -1028,10 +1028,10 @@ define i32 @rmw_add_i8_i32_z_with_folded_or_offset(i32 %x, i32 %v) {
 ; CHECK: i64.atomic.rmw8.add_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1{{$}}
 define i64 @rmw_add_i8_i64_z_with_folded_or_offset(i32 %x, i64 %v) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
   %t = trunc i64 %v to i8
-  %old = atomicrmw add i8* %arrayidx, i8 %t seq_cst
+  %old = atomicrmw add ptr %arrayidx, i8 %t seq_cst
   %conv = zext i8 %old to i64
   ret i64 %conv
 }
@@ -1042,9 +1042,9 @@ define i64 @rmw_add_i8_i64_z_with_folded_or_offset(i32 %x, i64 %v) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.rmw16.add_u $push1=, 42($pop0), $0{{$}}
 define i32 @rmw_add_i16_i32_z_from_numeric_address(i32 %v) {
-  %s = inttoptr i32 42 to i16*
+  %s = inttoptr i32 42 to ptr
   %t = trunc i32 %v to i16
-  %old = atomicrmw add i16* %s, i16 %t seq_cst
+  %old = atomicrmw add ptr %s, i16 %t seq_cst
   %u = zext i16 %old to i32
   ret i32 %u
 }
@@ -1054,7 +1054,7 @@ define i32 @rmw_add_i16_i32_z_from_numeric_address(i32 %v) {
 ; CHECK: i32.atomic.rmw8.add_u $push1=, gv8($pop0), $0{{$}}
 define i32 @rmw_add_i8_i32_z_from_global_address(i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* @gv8, i8 %t seq_cst
+  %old = atomicrmw add ptr @gv8, i8 %t seq_cst
   %u = zext i8 %old to i32
   ret i32 %u
 }
@@ -1064,9 +1064,9 @@ define i32 @rmw_add_i8_i32_z_from_global_address(i32 %v) {
 ; CHECK-LABEL: rmw_add_i8_i32_retvalue:
 ; CHECK: i32.atomic.rmw8.add_u $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i8 @rmw_add_i8_i32_retvalue(i8 *%p, i32 %v) {
+define i8 @rmw_add_i8_i32_retvalue(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  %old = atomicrmw add i8* %p, i8 %t seq_cst
+  %old = atomicrmw add ptr %p, i8 %t seq_cst
   ret i8 %old
 }
 
@@ -1080,8 +1080,8 @@ define i8 @rmw_add_i8_i32_retvalue(i8 *%p, i32 %v) {
 ; CHECK-NEXT: .functype cmpxchg_i32_no_offset (i32, i32, i32) -> (i32){{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @cmpxchg_i32_no_offset(i32* %p, i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* %p, i32 %exp, i32 %new seq_cst seq_cst
+define i32 @cmpxchg_i32_no_offset(ptr %p, i32 %exp, i32 %new) {
+  %pair = cmpxchg ptr %p, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -1090,11 +1090,11 @@ define i32 @cmpxchg_i32_no_offset(i32* %p, i32 %exp, i32 %new) {
 
 ; CHECK-LABEL: cmpxchg_i32_with_folded_offset:
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 24($0), $1, $2{{$}}
-define i32 @cmpxchg_i32_with_folded_offset(i32* %p, i32 %exp, i32 %new) {
-  %q = ptrtoint i32* %p to i32
+define i32 @cmpxchg_i32_with_folded_offset(ptr %p, i32 %exp, i32 %new) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %pair = cmpxchg i32* %s, i32 %exp, i32 %new seq_cst seq_cst
+  %s = inttoptr i32 %r to ptr
+  %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -1103,9 +1103,9 @@ define i32 @cmpxchg_i32_with_folded_offset(i32* %p, i32 %exp, i32 %new) {
 
 ; CHECK-LABEL: cmpxchg_i32_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw.cmpxchg $push0=, 24($0), $1, $2{{$}}
-define i32 @cmpxchg_i32_with_folded_gep_offset(i32* %p, i32 %exp, i32 %new) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %pair = cmpxchg i32* %s, i32 %exp, i32 %new seq_cst seq_cst
+define i32 @cmpxchg_i32_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -1116,9 +1116,9 @@ define i32 @cmpxchg_i32_with_folded_gep_offset(i32* %p, i32 %exp, i32 %new) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
-define i32 @cmpxchg_i32_with_unfolded_gep_negative_offset(i32* %p, i32 %exp, i32 %new) {
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  %pair = cmpxchg i32* %s, i32 %exp, i32 %new seq_cst seq_cst
+define i32 @cmpxchg_i32_with_unfolded_gep_negative_offset(ptr %p, i32 %exp, i32 %new) {
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -1129,11 +1129,11 @@ define i32 @cmpxchg_i32_with_unfolded_gep_negative_offset(i32* %p, i32 %exp, i32
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
-define i32 @cmpxchg_i32_with_unfolded_offset(i32* %p, i32 %exp, i32 %new) {
-  %q = ptrtoint i32* %p to i32
+define i32 @cmpxchg_i32_with_unfolded_offset(ptr %p, i32 %exp, i32 %new) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %pair = cmpxchg i32* %s, i32 %exp, i32 %new seq_cst seq_cst
+  %s = inttoptr i32 %r to ptr
+  %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -1144,9 +1144,9 @@ define i32 @cmpxchg_i32_with_unfolded_offset(i32* %p, i32 %exp, i32 %new) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
-define i32 @cmpxchg_i32_with_unfolded_gep_offset(i32* %p, i32 %exp, i32 %new) {
-  %s = getelementptr i32, i32* %p, i32 6
-  %pair = cmpxchg i32* %s, i32 %exp, i32 %new seq_cst seq_cst
+define i32 @cmpxchg_i32_with_unfolded_gep_offset(ptr %p, i32 %exp, i32 %new) {
+  %s = getelementptr i32, ptr %p, i32 6
+  %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -1157,8 +1157,8 @@ define i32 @cmpxchg_i32_with_unfolded_gep_offset(i32* %p, i32 %exp, i32 %new) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push1=, 42($pop0), $0, $1{{$}}
 define i32 @cmpxchg_i32_from_numeric_address(i32 %exp, i32 %new) {
-  %s = inttoptr i32 42 to i32*
-  %pair = cmpxchg i32* %s, i32 %exp, i32 %new seq_cst seq_cst
+  %s = inttoptr i32 42 to ptr
+  %pair = cmpxchg ptr %s, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -1167,7 +1167,7 @@ define i32 @cmpxchg_i32_from_numeric_address(i32 %exp, i32 %new) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.rmw.cmpxchg $push1=, gv($pop0), $0, $1{{$}}
 define i32 @cmpxchg_i32_from_global_address(i32 %exp, i32 %new) {
-  %pair = cmpxchg i32* @gv, i32 %exp, i32 %new seq_cst seq_cst
+  %pair = cmpxchg ptr @gv, i32 %exp, i32 %new seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   ret i32 %old
 }
@@ -1182,8 +1182,8 @@ define i32 @cmpxchg_i32_from_global_address(i32 %exp, i32 %new) {
 ; CHECK-NEXT: .functype cmpxchg_i64_no_offset (i32, i64, i64) -> (i64){{$}}
 ; CHECK: i64.atomic.rmw.cmpxchg $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @cmpxchg_i64_no_offset(i64* %p, i64 %exp, i64 %new) {
-  %pair = cmpxchg i64* %p, i64 %exp, i64 %new seq_cst seq_cst
+define i64 @cmpxchg_i64_no_offset(ptr %p, i64 %exp, i64 %new) {
+  %pair = cmpxchg ptr %p, i64 %exp, i64 %new seq_cst seq_cst
   %old = extractvalue { i64, i1 } %pair, 0
   ret i64 %old
 }
@@ -1192,11 +1192,11 @@ define i64 @cmpxchg_i64_no_offset(i64* %p, i64 %exp, i64 %new) {
 
 ; CHECK-LABEL: cmpxchg_i64_with_folded_offset:
 ; CHECK: i64.atomic.rmw.cmpxchg $push0=, 24($0), $1, $2{{$}}
-define i64 @cmpxchg_i64_with_folded_offset(i64* %p, i64 %exp, i64 %new) {
-  %q = ptrtoint i64* %p to i32
+define i64 @cmpxchg_i64_with_folded_offset(ptr %p, i64 %exp, i64 %new) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %pair = cmpxchg i64* %s, i64 %exp, i64 %new seq_cst seq_cst
+  %s = inttoptr i32 %r to ptr
+  %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
   %old = extractvalue { i64, i1 } %pair, 0
   ret i64 %old
 }
@@ -1205,9 +1205,9 @@ define i64 @cmpxchg_i64_with_folded_offset(i64* %p, i64 %exp, i64 %new) {
 
 ; CHECK-LABEL: cmpxchg_i64_with_folded_gep_offset:
 ; CHECK: i64.atomic.rmw.cmpxchg $push0=, 24($0), $1, $2{{$}}
-define i64 @cmpxchg_i64_with_folded_gep_offset(i64* %p, i64 %exp, i64 %new) {
-  %s = getelementptr inbounds i64, i64* %p, i32 3
-  %pair = cmpxchg i64* %s, i64 %exp, i64 %new seq_cst seq_cst
+define i64 @cmpxchg_i64_with_folded_gep_offset(ptr %p, i64 %exp, i64 %new) {
+  %s = getelementptr inbounds i64, ptr %p, i32 3
+  %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
   %old = extractvalue { i64, i1 } %pair, 0
   ret i64 %old
 }
@@ -1218,9 +1218,9 @@ define i64 @cmpxchg_i64_with_folded_gep_offset(i64* %p, i64 %exp, i64 %new) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
-define i64 @cmpxchg_i64_with_unfolded_gep_negative_offset(i64* %p, i64 %exp, i64 %new) {
-  %s = getelementptr inbounds i64, i64* %p, i32 -3
-  %pair = cmpxchg i64* %s, i64 %exp, i64 %new seq_cst seq_cst
+define i64 @cmpxchg_i64_with_unfolded_gep_negative_offset(ptr %p, i64 %exp, i64 %new) {
+  %s = getelementptr inbounds i64, ptr %p, i32 -3
+  %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
   %old = extractvalue { i64, i1 } %pair, 0
   ret i64 %old
 }
@@ -1231,11 +1231,11 @@ define i64 @cmpxchg_i64_with_unfolded_gep_negative_offset(i64* %p, i64 %exp, i64
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
-define i64 @cmpxchg_i64_with_unfolded_offset(i64* %p, i64 %exp, i64 %new) {
-  %q = ptrtoint i64* %p to i32
+define i64 @cmpxchg_i64_with_unfolded_offset(ptr %p, i64 %exp, i64 %new) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %pair = cmpxchg i64* %s, i64 %exp, i64 %new seq_cst seq_cst
+  %s = inttoptr i32 %r to ptr
+  %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
   %old = extractvalue { i64, i1 } %pair, 0
   ret i64 %old
 }
@@ -1246,9 +1246,9 @@ define i64 @cmpxchg_i64_with_unfolded_offset(i64* %p, i64 %exp, i64 %new) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: i64.atomic.rmw.cmpxchg $push2=, 0($pop1), $1, $2{{$}}
-define i64 @cmpxchg_i64_with_unfolded_gep_offset(i64* %p, i64 %exp, i64 %new) {
-  %s = getelementptr i64, i64* %p, i32 3
-  %pair = cmpxchg i64* %s, i64 %exp, i64 %new seq_cst seq_cst
+define i64 @cmpxchg_i64_with_unfolded_gep_offset(ptr %p, i64 %exp, i64 %new) {
+  %s = getelementptr i64, ptr %p, i32 3
+  %pair = cmpxchg ptr %s, i64 %exp, i64 %new seq_cst seq_cst
   %old = extractvalue { i64, i1 } %pair, 0
   ret i64 %old
 }
@@ -1262,13 +1262,13 @@ define i64 @cmpxchg_i64_with_unfolded_gep_offset(i64* %p, i64 %exp, i64 %new) {
 ; CHECK-LABEL: cmpxchg_i8_i32_s_with_folded_offset:
 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
-define i32 @cmpxchg_i8_i32_s_with_folded_offset(i8* %p, i32 %exp, i32 %new) {
-  %q = ptrtoint i8* %p to i32
+define i32 @cmpxchg_i8_i32_s_with_folded_offset(ptr %p, i32 %exp, i32 %new) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
+  %s = inttoptr i32 %r to ptr
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %u = sext i8 %old to i32
   ret i32 %u
@@ -1280,13 +1280,13 @@ define i32 @cmpxchg_i8_i32_s_with_folded_offset(i8* %p, i32 %exp, i32 %new) {
 ; CHECK-NEXT: i32.wrap_i64 $push0=, $2
 ; CHECK-NEXT: i32.atomic.rmw.cmpxchg $push2=, 24($0), $pop1, $pop0{{$}}
 ; CHECK-NEXT: i64.extend_i32_s $push3=, $pop2{{$}}
-define i64 @cmpxchg_i32_i64_s_with_folded_offset(i32* %p, i64 %exp, i64 %new) {
-  %q = ptrtoint i32* %p to i32
+define i64 @cmpxchg_i32_i64_s_with_folded_offset(ptr %p, i64 %exp, i64 %new) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
+  %s = inttoptr i32 %r to ptr
   %exp_t = trunc i64 %exp to i32
   %new_t = trunc i64 %new to i32
-  %pair = cmpxchg i32* %s, i32 %exp_t, i32 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i32 %exp_t, i32 %new_t seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   %u = sext i32 %old to i64
   ret i64 %u
@@ -1297,11 +1297,11 @@ define i64 @cmpxchg_i32_i64_s_with_folded_offset(i32* %p, i64 %exp, i64 %new) {
 ; CHECK-LABEL: cmpxchg_i8_i32_s_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend8_s $push1=, $pop0
-define i32 @cmpxchg_i8_i32_s_with_folded_gep_offset(i8* %p, i32 %exp, i32 %new) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
+define i32 @cmpxchg_i8_i32_s_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %u = sext i8 %old to i32
   ret i32 %u
@@ -1310,11 +1310,11 @@ define i32 @cmpxchg_i8_i32_s_with_folded_gep_offset(i8* %p, i32 %exp, i32 %new)
 ; CHECK-LABEL: cmpxchg_i16_i32_s_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
 ; CHECK-NEXT: i32.extend16_s $push1=, $pop0
-define i32 @cmpxchg_i16_i32_s_with_folded_gep_offset(i16* %p, i32 %exp, i32 %new) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define i32 @cmpxchg_i16_i32_s_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %exp_t = trunc i32 %exp to i16
   %new_t = trunc i32 %new to i16
-  %pair = cmpxchg i16* %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %u = sext i16 %old to i32
   ret i32 %u
@@ -1323,11 +1323,11 @@ define i32 @cmpxchg_i16_i32_s_with_folded_gep_offset(i16* %p, i32 %exp, i32 %new
 ; CHECK-LABEL: cmpxchg_i16_i64_s_with_folded_gep_offset:
 ; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
 ; CHECK-NEXT: i64.extend16_s $push1=, $pop0
-define i64 @cmpxchg_i16_i64_s_with_folded_gep_offset(i16* %p, i64 %exp, i64 %new) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define i64 @cmpxchg_i16_i64_s_with_folded_gep_offset(ptr %p, i64 %exp, i64 %new) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %exp_t = trunc i64 %exp to i16
   %new_t = trunc i64 %new to i16
-  %pair = cmpxchg i16* %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %u = sext i16 %old to i64
   ret i64 %u
@@ -1341,11 +1341,11 @@ define i64 @cmpxchg_i16_i64_s_with_folded_gep_offset(i16* %p, i64 %exp, i64 %new
 ; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i32 @cmpxchg_i8_i32_s_with_folded_or_offset(i32 %x, i32 %exp, i32 %new) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %conv = sext i8 %old to i32
   ret i32 %conv
@@ -1356,11 +1356,11 @@ define i32 @cmpxchg_i8_i32_s_with_folded_or_offset(i32 %x, i32 %exp, i32 %new) {
 ; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
 define i64 @cmpxchg_i8_i64_s_with_folded_or_offset(i32 %x, i64 %exp, i64 %new) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
   %exp_t = trunc i64 %exp to i8
   %new_t = trunc i64 %new to i8
-  %pair = cmpxchg i8* %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %conv = sext i8 %old to i64
   ret i64 %conv
@@ -1373,10 +1373,10 @@ define i64 @cmpxchg_i8_i64_s_with_folded_or_offset(i32 %x, i64 %exp, i64 %new) {
 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push1=, 42($pop0), $0, $1{{$}}
 ; CHECK-NEXT: i32.extend16_s $push2=, $pop1
 define i32 @cmpxchg_i16_i32_s_from_numeric_address(i32 %exp, i32 %new) {
-  %s = inttoptr i32 42 to i16*
+  %s = inttoptr i32 42 to ptr
   %exp_t = trunc i32 %exp to i16
   %new_t = trunc i32 %new to i16
-  %pair = cmpxchg i16* %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %u = sext i16 %old to i32
   ret i32 %u
@@ -1389,7 +1389,7 @@ define i32 @cmpxchg_i16_i32_s_from_numeric_address(i32 %exp, i32 %new) {
 define i32 @cmpxchg_i8_i32_s_from_global_address(i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* @gv8, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr @gv8, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %u = sext i8 %old to i32
   ret i32 %u
@@ -1403,13 +1403,13 @@ define i32 @cmpxchg_i8_i32_s_from_global_address(i32 %exp, i32 %new) {
 
 ; CHECK-LABEL: cmpxchg_i8_i32_z_with_folded_offset:
 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
-define i32 @cmpxchg_i8_i32_z_with_folded_offset(i8* %p, i32 %exp, i32 %new) {
-  %q = ptrtoint i8* %p to i32
+define i32 @cmpxchg_i8_i32_z_with_folded_offset(ptr %p, i32 %exp, i32 %new) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
+  %s = inttoptr i32 %r to ptr
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %u = zext i8 %old to i32
   ret i32 %u
@@ -1417,13 +1417,13 @@ define i32 @cmpxchg_i8_i32_z_with_folded_offset(i8* %p, i32 %exp, i32 %new) {
 
 ; CHECK-LABEL: cmpxchg_i32_i64_z_with_folded_offset:
 ; CHECK: i64.atomic.rmw32.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
-define i64 @cmpxchg_i32_i64_z_with_folded_offset(i32* %p, i64 %exp, i64 %new) {
-  %q = ptrtoint i32* %p to i32
+define i64 @cmpxchg_i32_i64_z_with_folded_offset(ptr %p, i64 %exp, i64 %new) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
+  %s = inttoptr i32 %r to ptr
   %exp_t = trunc i64 %exp to i32
   %new_t = trunc i64 %new to i32
-  %pair = cmpxchg i32* %s, i32 %exp_t, i32 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i32 %exp_t, i32 %new_t seq_cst seq_cst
   %old = extractvalue { i32, i1 } %pair, 0
   %u = zext i32 %old to i64
   ret i64 %u
@@ -1433,11 +1433,11 @@ define i64 @cmpxchg_i32_i64_z_with_folded_offset(i32* %p, i64 %exp, i64 %new) {
 
 ; CHECK-LABEL: cmpxchg_i8_i32_z_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push0=, 24($0), $1, $2{{$}}
-define i32 @cmpxchg_i8_i32_z_with_folded_gep_offset(i8* %p, i32 %exp, i32 %new) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
+define i32 @cmpxchg_i8_i32_z_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %u = zext i8 %old to i32
   ret i32 %u
@@ -1445,11 +1445,11 @@ define i32 @cmpxchg_i8_i32_z_with_folded_gep_offset(i8* %p, i32 %exp, i32 %new)
 
 ; CHECK-LABEL: cmpxchg_i16_i32_z_with_folded_gep_offset:
 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
-define i32 @cmpxchg_i16_i32_z_with_folded_gep_offset(i16* %p, i32 %exp, i32 %new) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define i32 @cmpxchg_i16_i32_z_with_folded_gep_offset(ptr %p, i32 %exp, i32 %new) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %exp_t = trunc i32 %exp to i16
   %new_t = trunc i32 %new to i16
-  %pair = cmpxchg i16* %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %u = zext i16 %old to i32
   ret i32 %u
@@ -1457,11 +1457,11 @@ define i32 @cmpxchg_i16_i32_z_with_folded_gep_offset(i16* %p, i32 %exp, i32 %new
 
 ; CHECK-LABEL: cmpxchg_i16_i64_z_with_folded_gep_offset:
 ; CHECK: i64.atomic.rmw16.cmpxchg_u $push0=, 48($0), $1, $2{{$}}
-define i64 @cmpxchg_i16_i64_z_with_folded_gep_offset(i16* %p, i64 %exp, i64 %new) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define i64 @cmpxchg_i16_i64_z_with_folded_gep_offset(ptr %p, i64 %exp, i64 %new) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %exp_t = trunc i64 %exp to i16
   %new_t = trunc i64 %new to i16
-  %pair = cmpxchg i16* %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %u = zext i16 %old to i64
   ret i64 %u
@@ -1474,11 +1474,11 @@ define i64 @cmpxchg_i16_i64_z_with_folded_gep_offset(i16* %p, i64 %exp, i64 %new
 ; CHECK: i32.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
 define i32 @cmpxchg_i8_i32_z_with_folded_or_offset(i32 %x, i32 %exp, i32 %new) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %conv = zext i8 %old to i32
   ret i32 %conv
@@ -1488,11 +1488,11 @@ define i32 @cmpxchg_i8_i32_z_with_folded_or_offset(i32 %x, i32 %exp, i32 %new) {
 ; CHECK: i64.atomic.rmw8.cmpxchg_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}), $1, $2{{$}}
 define i64 @cmpxchg_i8_i64_z_with_folded_or_offset(i32 %x, i64 %exp, i64 %new) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
   %exp_t = trunc i64 %exp to i8
   %new_t = trunc i64 %new to i8
-  %pair = cmpxchg i8* %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %arrayidx, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %conv = zext i8 %old to i64
   ret i64 %conv
@@ -1504,10 +1504,10 @@ define i64 @cmpxchg_i8_i64_z_with_folded_or_offset(i32 %x, i64 %exp, i64 %new) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.atomic.rmw16.cmpxchg_u $push1=, 42($pop0), $0, $1{{$}}
 define i32 @cmpxchg_i16_i32_z_from_numeric_address(i32 %exp, i32 %new) {
-  %s = inttoptr i32 42 to i16*
+  %s = inttoptr i32 42 to ptr
   %exp_t = trunc i32 %exp to i16
   %new_t = trunc i32 %new to i16
-  %pair = cmpxchg i16* %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr %s, i16 %exp_t, i16 %new_t seq_cst seq_cst
   %old = extractvalue { i16, i1 } %pair, 0
   %u = zext i16 %old to i32
   ret i32 %u
@@ -1519,7 +1519,7 @@ define i32 @cmpxchg_i16_i32_z_from_numeric_address(i32 %exp, i32 %new) {
 define i32 @cmpxchg_i8_i32_z_from_global_address(i32 %exp, i32 %new) {
   %exp_t = trunc i32 %exp to i8
   %new_t = trunc i32 %new to i8
-  %pair = cmpxchg i8* @gv8, i8 %exp_t, i8 %new_t seq_cst seq_cst
+  %pair = cmpxchg ptr @gv8, i8 %exp_t, i8 %new_t seq_cst seq_cst
   %old = extractvalue { i8, i1 } %pair, 0
   %u = zext i8 %old to i32
   ret i32 %u
@@ -1529,15 +1529,15 @@ define i32 @cmpxchg_i8_i32_z_from_global_address(i32 %exp, i32 %new) {
 ; Waits: 32-bit
 ;===----------------------------------------------------------------------------
 
-declare i32 @llvm.wasm.memory.atomic.wait32(i32*, i32, i64)
+declare i32 @llvm.wasm.memory.atomic.wait32(ptr, i32, i64)
 
 ; Basic wait.
 
 ; CHECK-LABEL: wait32_no_offset:
 ; CHECK: memory.atomic.wait32 $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @wait32_no_offset(i32* %p, i32 %exp, i64 %timeout) {
-  %v = call i32 @llvm.wasm.memory.atomic.wait32(i32* %p, i32 %exp, i64 %timeout)
+define i32 @wait32_no_offset(ptr %p, i32 %exp, i64 %timeout) {
+  %v = call i32 @llvm.wasm.memory.atomic.wait32(ptr %p, i32 %exp, i64 %timeout)
   ret i32 %v
 }
 
@@ -1545,11 +1545,11 @@ define i32 @wait32_no_offset(i32* %p, i32 %exp, i64 %timeout) {
 
 ; CHECK-LABEL: wait32_with_folded_offset:
 ; CHECK: memory.atomic.wait32 $push0=, 24($0), $1, $2{{$}}
-define i32 @wait32_with_folded_offset(i32* %p, i32 %exp, i64 %timeout) {
-  %q = ptrtoint i32* %p to i32
+define i32 @wait32_with_folded_offset(ptr %p, i32 %exp, i64 %timeout) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = call i32 @llvm.wasm.memory.atomic.wait32(i32* %s, i32 %exp, i64 %timeout)
+  %s = inttoptr i32 %r to ptr
+  %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1557,9 +1557,9 @@ define i32 @wait32_with_folded_offset(i32* %p, i32 %exp, i64 %timeout) {
 
 ; CHECK-LABEL: wait32_with_folded_gep_offset:
 ; CHECK: memory.atomic.wait32 $push0=, 24($0), $1, $2{{$}}
-define i32 @wait32_with_folded_gep_offset(i32* %p, i32 %exp, i64 %timeout) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %t = call i32 @llvm.wasm.memory.atomic.wait32(i32* %s, i32 %exp, i64 %timeout)
+define i32 @wait32_with_folded_gep_offset(ptr %p, i32 %exp, i64 %timeout) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1569,9 +1569,9 @@ define i32 @wait32_with_folded_gep_offset(i32* %p, i32 %exp, i64 %timeout) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.wait32 $push2=, 0($pop1), $1, $2{{$}}
-define i32 @wait32_with_unfolded_gep_negative_offset(i32* %p, i32 %exp, i64 %timeout) {
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  %t = call i32 @llvm.wasm.memory.atomic.wait32(i32* %s, i32 %exp, i64 %timeout)
+define i32 @wait32_with_unfolded_gep_negative_offset(ptr %p, i32 %exp, i64 %timeout) {
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1581,11 +1581,11 @@ define i32 @wait32_with_unfolded_gep_negative_offset(i32* %p, i32 %exp, i64 %tim
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.wait32 $push2=, 0($pop1), $1, $2{{$}}
-define i32 @wait32_with_unfolded_offset(i32* %p, i32 %exp, i64 %timeout) {
-  %q = ptrtoint i32* %p to i32
+define i32 @wait32_with_unfolded_offset(ptr %p, i32 %exp, i64 %timeout) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = call i32 @llvm.wasm.memory.atomic.wait32(i32* %s, i32 %exp, i64 %timeout)
+  %s = inttoptr i32 %r to ptr
+  %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1595,9 +1595,9 @@ define i32 @wait32_with_unfolded_offset(i32* %p, i32 %exp, i64 %timeout) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.wait32 $push2=, 0($pop1), $1, $2{{$}}
-define i32 @wait32_with_unfolded_gep_offset(i32* %p, i32 %exp, i64 %timeout) {
-  %s = getelementptr i32, i32* %p, i32 6
-  %t = call i32 @llvm.wasm.memory.atomic.wait32(i32* %s, i32 %exp, i64 %timeout)
+define i32 @wait32_with_unfolded_gep_offset(ptr %p, i32 %exp, i64 %timeout) {
+  %s = getelementptr i32, ptr %p, i32 6
+  %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1607,8 +1607,8 @@ define i32 @wait32_with_unfolded_gep_offset(i32* %p, i32 %exp, i64 %timeout) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: memory.atomic.wait32 $push1=, 42($pop0), $0, $1{{$}}
 define i32 @wait32_from_numeric_address(i32 %exp, i64 %timeout) {
-  %s = inttoptr i32 42 to i32*
-  %t = call i32 @llvm.wasm.memory.atomic.wait32(i32* %s, i32 %exp, i64 %timeout)
+  %s = inttoptr i32 42 to ptr
+  %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr %s, i32 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1616,7 +1616,7 @@ define i32 @wait32_from_numeric_address(i32 %exp, i64 %timeout) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: memory.atomic.wait32 $push1=, gv($pop0), $0, $1{{$}}
 define i32 @wait32_from_global_address(i32 %exp, i64 %timeout) {
-  %t = call i32 @llvm.wasm.memory.atomic.wait32(i32* @gv, i32 %exp, i64 %timeout)
+  %t = call i32 @llvm.wasm.memory.atomic.wait32(ptr @gv, i32 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1624,15 +1624,15 @@ define i32 @wait32_from_global_address(i32 %exp, i64 %timeout) {
 ; Waits: 64-bit
 ;===----------------------------------------------------------------------------
 
-declare i32 @llvm.wasm.memory.atomic.wait64(i64*, i64, i64)
+declare i32 @llvm.wasm.memory.atomic.wait64(ptr, i64, i64)
 
 ; Basic wait.
 
 ; CHECK-LABEL: wait64_no_offset:
 ; CHECK: memory.atomic.wait64 $push0=, 0($0), $1, $2{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @wait64_no_offset(i64* %p, i64 %exp, i64 %timeout) {
-  %v = call i32 @llvm.wasm.memory.atomic.wait64(i64* %p, i64 %exp, i64 %timeout)
+define i32 @wait64_no_offset(ptr %p, i64 %exp, i64 %timeout) {
+  %v = call i32 @llvm.wasm.memory.atomic.wait64(ptr %p, i64 %exp, i64 %timeout)
   ret i32 %v
 }
 
@@ -1640,11 +1640,11 @@ define i32 @wait64_no_offset(i64* %p, i64 %exp, i64 %timeout) {
 
 ; CHECK-LABEL: wait64_with_folded_offset:
 ; CHECK: memory.atomic.wait64 $push0=, 24($0), $1, $2{{$}}
-define i32 @wait64_with_folded_offset(i64* %p, i64 %exp, i64 %timeout) {
-  %q = ptrtoint i64* %p to i32
+define i32 @wait64_with_folded_offset(ptr %p, i64 %exp, i64 %timeout) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %t = call i32 @llvm.wasm.memory.atomic.wait64(i64* %s, i64 %exp, i64 %timeout)
+  %s = inttoptr i32 %r to ptr
+  %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1652,9 +1652,9 @@ define i32 @wait64_with_folded_offset(i64* %p, i64 %exp, i64 %timeout) {
 
 ; CHECK-LABEL: wait64_with_folded_gep_offset:
 ; CHECK: memory.atomic.wait64 $push0=, 24($0), $1, $2{{$}}
-define i32 @wait64_with_folded_gep_offset(i64* %p, i64 %exp, i64 %timeout) {
-  %s = getelementptr inbounds i64, i64* %p, i32 3
-  %t = call i32 @llvm.wasm.memory.atomic.wait64(i64* %s, i64 %exp, i64 %timeout)
+define i32 @wait64_with_folded_gep_offset(ptr %p, i64 %exp, i64 %timeout) {
+  %s = getelementptr inbounds i64, ptr %p, i32 3
+  %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1664,9 +1664,9 @@ define i32 @wait64_with_folded_gep_offset(i64* %p, i64 %exp, i64 %timeout) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.wait64 $push2=, 0($pop1), $1, $2{{$}}
-define i32 @wait64_with_unfolded_gep_negative_offset(i64* %p, i64 %exp, i64 %timeout) {
-  %s = getelementptr inbounds i64, i64* %p, i32 -3
-  %t = call i32 @llvm.wasm.memory.atomic.wait64(i64* %s, i64 %exp, i64 %timeout)
+define i32 @wait64_with_unfolded_gep_negative_offset(ptr %p, i64 %exp, i64 %timeout) {
+  %s = getelementptr inbounds i64, ptr %p, i32 -3
+  %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1676,11 +1676,11 @@ define i32 @wait64_with_unfolded_gep_negative_offset(i64* %p, i64 %exp, i64 %tim
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.wait64 $push2=, 0($pop1), $1, $2{{$}}
-define i32 @wait64_with_unfolded_offset(i64* %p, i64 %exp, i64 %timeout) {
-  %q = ptrtoint i64* %p to i32
+define i32 @wait64_with_unfolded_offset(ptr %p, i64 %exp, i64 %timeout) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %t = call i32 @llvm.wasm.memory.atomic.wait64(i64* %s, i64 %exp, i64 %timeout)
+  %s = inttoptr i32 %r to ptr
+  %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1690,9 +1690,9 @@ define i32 @wait64_with_unfolded_offset(i64* %p, i64 %exp, i64 %timeout) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.wait64 $push2=, 0($pop1), $1, $2{{$}}
-define i32 @wait64_with_unfolded_gep_offset(i64* %p, i64 %exp, i64 %timeout) {
-  %s = getelementptr i64, i64* %p, i32 3
-  %t = call i32 @llvm.wasm.memory.atomic.wait64(i64* %s, i64 %exp, i64 %timeout)
+define i32 @wait64_with_unfolded_gep_offset(ptr %p, i64 %exp, i64 %timeout) {
+  %s = getelementptr i64, ptr %p, i32 3
+  %t = call i32 @llvm.wasm.memory.atomic.wait64(ptr %s, i64 %exp, i64 %timeout)
   ret i32 %t
 }
 
@@ -1700,15 +1700,15 @@ define i32 @wait64_with_unfolded_gep_offset(i64* %p, i64 %exp, i64 %timeout) {
 ; Notifies
 ;===----------------------------------------------------------------------------
 
-declare i32 @llvm.wasm.memory.atomic.notify(i32*, i32)
+declare i32 @llvm.wasm.memory.atomic.notify(ptr, i32)
 
 ; Basic notify.
 
 ; CHECK-LABEL: notify_no_offset:
 ; CHECK: memory.atomic.notify $push0=, 0($0), $1{{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @notify_no_offset(i32* %p, i32 %notify_count) {
-  %v = call i32 @llvm.wasm.memory.atomic.notify(i32* %p, i32 %notify_count)
+define i32 @notify_no_offset(ptr %p, i32 %notify_count) {
+  %v = call i32 @llvm.wasm.memory.atomic.notify(ptr %p, i32 %notify_count)
   ret i32 %v
 }
 
@@ -1716,11 +1716,11 @@ define i32 @notify_no_offset(i32* %p, i32 %notify_count) {
 
 ; CHECK-LABEL: notify_with_folded_offset:
 ; CHECK: memory.atomic.notify $push0=, 24($0), $1{{$}}
-define i32 @notify_with_folded_offset(i32* %p, i32 %notify_count) {
-  %q = ptrtoint i32* %p to i32
+define i32 @notify_with_folded_offset(ptr %p, i32 %notify_count) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = call i32 @llvm.wasm.memory.atomic.notify(i32* %s, i32 %notify_count)
+  %s = inttoptr i32 %r to ptr
+  %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
   ret i32 %t
 }
 
@@ -1728,9 +1728,9 @@ define i32 @notify_with_folded_offset(i32* %p, i32 %notify_count) {
 
 ; CHECK-LABEL: notify_with_folded_gep_offset:
 ; CHECK: memory.atomic.notify $push0=, 24($0), $1{{$}}
-define i32 @notify_with_folded_gep_offset(i32* %p, i32 %notify_count) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %t = call i32 @llvm.wasm.memory.atomic.notify(i32* %s, i32 %notify_count)
+define i32 @notify_with_folded_gep_offset(ptr %p, i32 %notify_count) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
   ret i32 %t
 }
 
@@ -1740,9 +1740,9 @@ define i32 @notify_with_folded_gep_offset(i32* %p, i32 %notify_count) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.notify $push2=, 0($pop1), $1{{$}}
-define i32 @notify_with_unfolded_gep_negative_offset(i32* %p, i32 %notify_count) {
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  %t = call i32 @llvm.wasm.memory.atomic.notify(i32* %s, i32 %notify_count)
+define i32 @notify_with_unfolded_gep_negative_offset(ptr %p, i32 %notify_count) {
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
   ret i32 %t
 }
 
@@ -1752,11 +1752,11 @@ define i32 @notify_with_unfolded_gep_negative_offset(i32* %p, i32 %notify_count)
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.notify $push2=, 0($pop1), $1{{$}}
-define i32 @notify_with_unfolded_offset(i32* %p, i32 %notify_count) {
-  %q = ptrtoint i32* %p to i32
+define i32 @notify_with_unfolded_offset(ptr %p, i32 %notify_count) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = call i32 @llvm.wasm.memory.atomic.notify(i32* %s, i32 %notify_count)
+  %s = inttoptr i32 %r to ptr
+  %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
   ret i32 %t
 }
 
@@ -1766,9 +1766,9 @@ define i32 @notify_with_unfolded_offset(i32* %p, i32 %notify_count) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add $push1=, $0, $pop0{{$}}
 ; CHECK: memory.atomic.notify $push2=, 0($pop1), $1{{$}}
-define i32 @notify_with_unfolded_gep_offset(i32* %p, i32 %notify_count) {
-  %s = getelementptr i32, i32* %p, i32 6
-  %t = call i32 @llvm.wasm.memory.atomic.notify(i32* %s, i32 %notify_count)
+define i32 @notify_with_unfolded_gep_offset(ptr %p, i32 %notify_count) {
+  %s = getelementptr i32, ptr %p, i32 6
+  %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
   ret i32 %t
 }
 
@@ -1778,8 +1778,8 @@ define i32 @notify_with_unfolded_gep_offset(i32* %p, i32 %notify_count) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: memory.atomic.notify $push1=, 42($pop0), $0{{$}}
 define i32 @notify_from_numeric_address(i32 %notify_count) {
-  %s = inttoptr i32 42 to i32*
-  %t = call i32 @llvm.wasm.memory.atomic.notify(i32* %s, i32 %notify_count)
+  %s = inttoptr i32 42 to ptr
+  %t = call i32 @llvm.wasm.memory.atomic.notify(ptr %s, i32 %notify_count)
   ret i32 %t
 }
 
@@ -1787,6 +1787,6 @@ define i32 @notify_from_numeric_address(i32 %notify_count) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: memory.atomic.notify $push1=, gv($pop0), $0{{$}}
 define i32 @notify_from_global_address(i32 %notify_count) {
-  %t = call i32 @llvm.wasm.memory.atomic.notify(i32* @gv, i32 %notify_count)
+  %t = call i32 @llvm.wasm.memory.atomic.notify(ptr @gv, i32 %notify_count)
   ret i32 %t
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/offset-fastisel.ll b/llvm/test/CodeGen/WebAssembly/offset-fastisel.ll
index bf57f69c3f7d..f94a6a8c313d 100644
--- a/llvm/test/CodeGen/WebAssembly/offset-fastisel.ll
+++ b/llvm/test/CodeGen/WebAssembly/offset-fastisel.ll
@@ -8,9 +8,9 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK: i32.add    $push[[L0:[0-9]+]]=, $0, $1{{$}}
 ; CHECK: i32.const  $push[[L1:[0-9]+]]=, 0{{$}}
 ; CHECK: i32.store8 0($pop[[L0]]), $pop[[L1]]{{$}}
-define void @store_i8_with_variable_gep_offset(i8* %p, i32 %idx) {
-  %s = getelementptr inbounds i8, i8* %p, i32 %idx
-  store i8 0, i8* %s
+define void @store_i8_with_variable_gep_offset(ptr %p, i32 %idx) {
+  %s = getelementptr inbounds i8, ptr %p, i32 %idx
+  store i8 0, ptr %s
   ret void
 }
 
@@ -24,8 +24,8 @@ define void @store_i8_with_variable_gep_offset(i8* %p, i32 %idx) {
 ; CHECK: i32.store8  0($pop[[L4]]), $pop[[L5]]{{$}}
 define hidden void @store_i8_with_array_alloca_gep(i32 %idx) {
   %A = alloca [30 x i8], align 16
-  %s = getelementptr inbounds [30 x i8], [30 x i8]* %A, i32 0, i32 %idx
-  store i8 0, i8* %s, align 1
+  %s = getelementptr inbounds [30 x i8], ptr %A, i32 0, i32 %idx
+  store i8 0, ptr %s, align 1
   ret void
 }
 
@@ -34,25 +34,25 @@ define hidden void @store_i8_with_array_alloca_gep(i32 %idx) {
 ; CHECK: i32.add   $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
 ; CHECK: i32.const $push[[L2:[0-9]+]]=, 0{{$}}
 ; CHECK: i32.store 0($pop[[L1]]), $pop[[L2]]{{$}}
-define void @store_i32_with_unfolded_gep_offset(i32* %p) {
-  %s = getelementptr i32, i32* %p, i32 6
-  store i32 0, i32* %s
+define void @store_i32_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i32, ptr %p, i32 6
+  store i32 0, ptr %s
   ret void
 }
 
 ; CHECK-LABEL: store_i32_with_folded_gep_offset:
 ; CHECK: i32.store 24($0), $pop{{[0-9]+$}}
-define void @store_i32_with_folded_gep_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  store i32 0, i32* %s
+define void @store_i32_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  store i32 0, ptr %s
   ret void
 }
 
 ; CHECK-LABEL: load_i32_with_folded_gep_offset:
 ; CHECK: i32.load  $push{{[0-9]+}}=, 24($0){{$}}
-define i32 @load_i32_with_folded_gep_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %t = load i32, i32* %s
+define i32 @load_i32_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %t = load i32, ptr %s
   ret i32 %t
 }
 
@@ -61,27 +61,27 @@ define i32 @load_i32_with_folded_gep_offset(i32* %p) {
 ; CHECK: i32.add   $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}}
 ; CHECK: i64.const $push[[L2:[0-9]+]]=, 0{{$}}
 ; CHECK: i64.store 0($pop[[L1]]), $pop[[L2]]{{$}}
-define void @store_i64_with_unfolded_gep_offset(i64* %p) {
-  %s = getelementptr i64, i64* %p, i32 3
-  store i64 0, i64* %s
+define void @store_i64_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i64, ptr %p, i32 3
+  store i64 0, ptr %s
   ret void
 }
 
 ; CHECK-LABEL: store_i8_with_folded_gep_offset:
 ; CHECK: i32.store8 24($0), $pop{{[0-9]+$}}
-define void @store_i8_with_folded_gep_offset(i8* %p) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
-  store i8 0, i8* %s
+define void @store_i8_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
+  store i8 0, ptr %s
   ret void
 }
 
 ; CHECK-LABEL: load_i8_u_with_folded_offset:
 ; CHECK: i32.load8_u $push{{[0-9]+}}=, 24($0){{$}}
-define i32 @load_i8_u_with_folded_offset(i8* %p) {
-  %q = ptrtoint i8* %p to i32
+define i32 @load_i8_u_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
-  %t = load i8, i8* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i8, ptr %s
   %u = zext i8 %t to i32
   ret i32 %u
 }
@@ -89,11 +89,11 @@ define i32 @load_i8_u_with_folded_offset(i8* %p) {
 ; TODO: this should be load8_s, need to fold sign-/zero-extend in fast-isel
 ; CHECK-LABEL: load_i8_s_with_folded_offset:
 ; CHECK: i32.load8_u $push{{[0-9]+}}=, 24($0){{$}}
-define i32 @load_i8_s_with_folded_offset(i8* %p) {
-  %q = ptrtoint i8* %p to i32
+define i32 @load_i8_s_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
-  %t = load i8, i8* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i8, ptr %s
   %u = sext i8 %t to i32
   ret i32 %u
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/offset-folding.ll b/llvm/test/CodeGen/WebAssembly/offset-folding.ll
index 8d5a484dda2c..41b7387efd52 100644
--- a/llvm/test/CodeGen/WebAssembly/offset-folding.ll
+++ b/llvm/test/CodeGen/WebAssembly/offset-folding.ll
@@ -13,16 +13,16 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-NEXT: .functype test0 () -> (i32){{$}}
 ; CHECK-NEXT: i32.const $push0=, x+188{{$}}
 ; CHECK=NEXT: return $pop0{{$}}
-define dso_local i32* @test0() {
-  ret i32* getelementptr ([0 x i32], [0 x i32]* @x, i32 0, i32 47)
+define dso_local ptr @test0() {
+  ret ptr getelementptr ([0 x i32], ptr @x, i32 0, i32 47)
 }
 
 ; CHECK-LABEL: test1:
 ; CHECK-NEXT: .functype test1 () -> (i32){{$}}
 ; CHECK-NEXT: i32.const $push0=, y+188{{$}}
 ; CHECK=NEXT: return $pop0{{$}}
-define dso_local i32* @test1() {
-  ret i32* getelementptr ([50 x i32], [50 x i32]* @y, i32 0, i32 47)
+define dso_local ptr @test1() {
+  ret ptr getelementptr ([50 x i32], ptr @y, i32 0, i32 47)
 }
 
 ; Test zero offsets.
@@ -31,16 +31,16 @@ define dso_local i32* @test1() {
 ; CHECK-NEXT: .functype test2 () -> (i32){{$}}
 ; CHECK-NEXT: i32.const $push0=, x{{$}}
 ; CHECK=NEXT: return $pop0{{$}}
-define dso_local i32* @test2() {
-  ret i32* getelementptr ([0 x i32], [0 x i32]* @x, i32 0, i32 0)
+define dso_local ptr @test2() {
+  ret ptr @x
 }
 
 ; CHECK-LABEL: test3:
 ; CHECK-NEXT: .functype test3 () -> (i32){{$}}
 ; CHECK-NEXT: i32.const $push0=, y{{$}}
 ; CHECK=NEXT: return $pop0{{$}}
-define dso_local i32* @test3() {
-  ret i32* getelementptr ([50 x i32], [50 x i32]* @y, i32 0, i32 0)
+define dso_local ptr @test3() {
+  ret ptr @y
 }
 
 ; Test negative offsets.
@@ -49,14 +49,14 @@ define dso_local i32* @test3() {
 ; CHECK-NEXT: .functype test4 () -> (i32){{$}}
 ; CHECK-NEXT: i32.const $push0=, x-188{{$}}
 ; CHECK=NEXT: return $pop0{{$}}
-define dso_local i32* @test4() {
-  ret i32* getelementptr ([0 x i32], [0 x i32]* @x, i32 0, i32 -47)
+define dso_local ptr @test4() {
+  ret ptr getelementptr ([0 x i32], ptr @x, i32 0, i32 -47)
 }
 
 ; CHECK-LABEL: test5:
 ; CHECK-NEXT: .functype test5 () -> (i32){{$}}
 ; CHECK-NEXT: i32.const $push0=, y-188{{$}}
 ; CHECK=NEXT: return $pop0{{$}}
-define dso_local i32* @test5() {
-  ret i32* getelementptr ([50 x i32], [50 x i32]* @y, i32 0, i32 -47)
+define dso_local ptr @test5() {
+  ret ptr getelementptr ([50 x i32], ptr @y, i32 0, i32 -47)
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/offset.ll b/llvm/test/CodeGen/WebAssembly/offset.ll
index 1136dbb99176..0d9fcf05ab1b 100644
--- a/llvm/test/CodeGen/WebAssembly/offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/offset.ll
@@ -13,8 +13,8 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-LABEL: load_i32_no_offset:
 ; CHECK: i32.load $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i32 @load_i32_no_offset(i32 *%p) {
-  %v = load i32, i32* %p
+define i32 @load_i32_no_offset(ptr %p) {
+  %v = load i32, ptr %p
   ret i32 %v
 }
 
@@ -22,11 +22,11 @@ define i32 @load_i32_no_offset(i32 *%p) {
 
 ; CHECK-LABEL: load_i32_with_folded_offset:
 ; CHECK: i32.load  $push0=, 24($0){{$}}
-define i32 @load_i32_with_folded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define i32 @load_i32_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i32, ptr %s
   ret i32 %t
 }
 
@@ -34,9 +34,9 @@ define i32 @load_i32_with_folded_offset(i32* %p) {
 
 ; CHECK-LABEL: load_i32_with_folded_gep_offset:
 ; CHECK: i32.load  $push0=, 24($0){{$}}
-define i32 @load_i32_with_folded_gep_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %t = load i32, i32* %s
+define i32 @load_i32_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %t = load i32, ptr %s
   ret i32 %t
 }
 
@@ -46,9 +46,9 @@ define i32 @load_i32_with_folded_gep_offset(i32* %p) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i32.load  $push2=, 0($pop1){{$}}
-define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  %t = load i32, i32* %s
+define i32 @load_i32_with_unfolded_gep_negative_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  %t = load i32, ptr %s
   ret i32 %t
 }
 
@@ -58,11 +58,11 @@ define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i32.load  $push2=, 0($pop1){{$}}
-define i32 @load_i32_with_unfolded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define i32 @load_i32_with_unfolded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i32, ptr %s
   ret i32 %t
 }
 
@@ -72,9 +72,9 @@ define i32 @load_i32_with_unfolded_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i32.load  $push2=, 0($pop1){{$}}
-define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
-  %s = getelementptr i32, i32* %p, i32 6
-  %t = load i32, i32* %s
+define i32 @load_i32_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i32, ptr %p, i32 6
+  %t = load i32, ptr %s
   ret i32 %t
 }
 
@@ -84,8 +84,8 @@ define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.load  $push1=, 42($pop0){{$}}
 define i32 @load_i32_from_numeric_address() {
-  %s = inttoptr i32 42 to i32*
-  %t = load i32, i32* %s
+  %s = inttoptr i32 42 to ptr
+  %t = load i32, ptr %s
   ret i32 %t
 }
 
@@ -94,7 +94,7 @@ define i32 @load_i32_from_numeric_address() {
 ; CHECK: i32.load  $push1=, gv($pop0){{$}}
 @gv = global i32 0
 define i32 @load_i32_from_global_address() {
-  %t = load i32, i32* @gv
+  %t = load i32, ptr @gv
   ret i32 %t
 }
 
@@ -107,8 +107,8 @@ define i32 @load_i32_from_global_address() {
 ; CHECK-LABEL: load_i64_no_offset:
 ; CHECK: i64.load $push0=, 0($0){{$}}
 ; CHECK-NEXT: return $pop0{{$}}
-define i64 @load_i64_no_offset(i64 *%p) {
-  %v = load i64, i64* %p
+define i64 @load_i64_no_offset(ptr %p) {
+  %v = load i64, ptr %p
   ret i64 %v
 }
 
@@ -116,11 +116,11 @@ define i64 @load_i64_no_offset(i64 *%p) {
 
 ; CHECK-LABEL: load_i64_with_folded_offset:
 ; CHECK: i64.load  $push0=, 24($0){{$}}
-define i64 @load_i64_with_folded_offset(i64* %p) {
-  %q = ptrtoint i64* %p to i32
+define i64 @load_i64_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %t = load i64, i64* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i64, ptr %s
   ret i64 %t
 }
 
@@ -128,9 +128,9 @@ define i64 @load_i64_with_folded_offset(i64* %p) {
 
 ; CHECK-LABEL: load_i64_with_folded_gep_offset:
 ; CHECK: i64.load  $push0=, 24($0){{$}}
-define i64 @load_i64_with_folded_gep_offset(i64* %p) {
-  %s = getelementptr inbounds i64, i64* %p, i32 3
-  %t = load i64, i64* %s
+define i64 @load_i64_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i64, ptr %p, i32 3
+  %t = load i64, ptr %s
   ret i64 %t
 }
 
@@ -140,9 +140,9 @@ define i64 @load_i64_with_folded_gep_offset(i64* %p) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i64.load  $push2=, 0($pop1){{$}}
-define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
-  %s = getelementptr inbounds i64, i64* %p, i32 -3
-  %t = load i64, i64* %s
+define i64 @load_i64_with_unfolded_gep_negative_offset(ptr %p) {
+  %s = getelementptr inbounds i64, ptr %p, i32 -3
+  %t = load i64, ptr %s
   ret i64 %t
 }
 
@@ -152,11 +152,11 @@ define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i64.load  $push2=, 0($pop1){{$}}
-define i64 @load_i64_with_unfolded_offset(i64* %p) {
-  %q = ptrtoint i64* %p to i32
+define i64 @load_i64_with_unfolded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %t = load i64, i64* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i64, ptr %s
   ret i64 %t
 }
 
@@ -166,9 +166,9 @@ define i64 @load_i64_with_unfolded_offset(i64* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i64.load  $push2=, 0($pop1){{$}}
-define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
-  %s = getelementptr i64, i64* %p, i32 3
-  %t = load i64, i64* %s
+define i64 @load_i64_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i64, ptr %p, i32 3
+  %t = load i64, ptr %s
   ret i64 %t
 }
 
@@ -182,8 +182,8 @@ define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
 ; CHECK-NEXT: .functype store_i32_no_offset (i32, i32) -> (){{$}}
 ; CHECK-NEXT: i32.store 0($0), $1{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @store_i32_no_offset(i32 *%p, i32 %v) {
-  store i32 %v, i32* %p
+define void @store_i32_no_offset(ptr %p, i32 %v) {
+  store i32 %v, ptr %p
   ret void
 }
 
@@ -191,11 +191,11 @@ define void @store_i32_no_offset(i32 *%p, i32 %v) {
 
 ; CHECK-LABEL: store_i32_with_folded_offset:
 ; CHECK: i32.store 24($0), $pop0{{$}}
-define void @store_i32_with_folded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define void @store_i32_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  store i32 0, i32* %s
+  %s = inttoptr i32 %r to ptr
+  store i32 0, ptr %s
   ret void
 }
 
@@ -203,9 +203,9 @@ define void @store_i32_with_folded_offset(i32* %p) {
 
 ; CHECK-LABEL: store_i32_with_folded_gep_offset:
 ; CHECK: i32.store 24($0), $pop0{{$}}
-define void @store_i32_with_folded_gep_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  store i32 0, i32* %s
+define void @store_i32_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  store i32 0, ptr %s
   ret void
 }
 
@@ -215,9 +215,9 @@ define void @store_i32_with_folded_gep_offset(i32* %p) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i32.store 0($pop1), $pop2{{$}}
-define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) {
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  store i32 0, i32* %s
+define void @store_i32_with_unfolded_gep_negative_offset(ptr %p) {
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  store i32 0, ptr %s
   ret void
 }
 
@@ -227,11 +227,11 @@ define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i32.store 0($pop1), $pop2{{$}}
-define void @store_i32_with_unfolded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define void @store_i32_with_unfolded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  store i32 0, i32* %s
+  %s = inttoptr i32 %r to ptr
+  store i32 0, ptr %s
   ret void
 }
 
@@ -241,9 +241,9 @@ define void @store_i32_with_unfolded_offset(i32* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i32.store 0($pop1), $pop2{{$}}
-define void @store_i32_with_unfolded_gep_offset(i32* %p) {
-  %s = getelementptr i32, i32* %p, i32 6
-  store i32 0, i32* %s
+define void @store_i32_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i32, ptr %p, i32 6
+  store i32 0, ptr %s
   ret void
 }
 
@@ -254,8 +254,8 @@ define void @store_i32_with_unfolded_gep_offset(i32* %p) {
 ; CHECK-NEXT: i32.const $push1=, 0{{$}}
 ; CHECK-NEXT: i32.store 42($pop0), $pop1{{$}}
 define void @store_i32_to_numeric_address() {
-  %s = inttoptr i32 42 to i32*
-  store i32 0, i32* %s
+  %s = inttoptr i32 42 to ptr
+  store i32 0, ptr %s
   ret void
 }
 
@@ -264,7 +264,7 @@ define void @store_i32_to_numeric_address() {
 ; CHECK: i32.const $push1=, 0{{$}}
 ; CHECK: i32.store gv($pop0), $pop1{{$}}
 define void @store_i32_to_global_address() {
-  store i32 0, i32* @gv
+  store i32 0, ptr @gv
   ret void
 }
 
@@ -276,11 +276,11 @@ define void @store_i32_to_global_address() {
 
 ; CHECK-LABEL: store_i64_with_folded_offset:
 ; CHECK: i64.store 24($0), $pop0{{$}}
-define void @store_i64_with_folded_offset(i64* %p) {
-  %q = ptrtoint i64* %p to i32
+define void @store_i64_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  store i64 0, i64* %s
+  %s = inttoptr i32 %r to ptr
+  store i64 0, ptr %s
   ret void
 }
 
@@ -288,9 +288,9 @@ define void @store_i64_with_folded_offset(i64* %p) {
 
 ; CHECK-LABEL: store_i64_with_folded_gep_offset:
 ; CHECK: i64.store 24($0), $pop0{{$}}
-define void @store_i64_with_folded_gep_offset(i64* %p) {
-  %s = getelementptr inbounds i64, i64* %p, i32 3
-  store i64 0, i64* %s
+define void @store_i64_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i64, ptr %p, i32 3
+  store i64 0, ptr %s
   ret void
 }
 
@@ -300,9 +300,9 @@ define void @store_i64_with_folded_gep_offset(i64* %p) {
 ; CHECK: i32.const $push0=, -24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i64.store 0($pop1), $pop2{{$}}
-define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) {
-  %s = getelementptr inbounds i64, i64* %p, i32 -3
-  store i64 0, i64* %s
+define void @store_i64_with_unfolded_gep_negative_offset(ptr %p) {
+  %s = getelementptr inbounds i64, ptr %p, i32 -3
+  store i64 0, ptr %s
   ret void
 }
 
@@ -312,11 +312,11 @@ define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i64.store 0($pop1), $pop2{{$}}
-define void @store_i64_with_unfolded_offset(i64* %p) {
-  %q = ptrtoint i64* %p to i32
+define void @store_i64_with_unfolded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  store i64 0, i64* %s
+  %s = inttoptr i32 %r to ptr
+  store i64 0, ptr %s
   ret void
 }
 
@@ -326,9 +326,9 @@ define void @store_i64_with_unfolded_offset(i64* %p) {
 ; CHECK: i32.const $push0=, 24{{$}}
 ; CHECK: i32.add   $push1=, $0, $pop0{{$}}
 ; CHECK: i64.store 0($pop1), $pop2{{$}}
-define void @store_i64_with_unfolded_gep_offset(i64* %p) {
-  %s = getelementptr i64, i64* %p, i32 3
-  store i64 0, i64* %s
+define void @store_i64_with_unfolded_gep_offset(ptr %p) {
+  %s = getelementptr i64, ptr %p, i32 3
+  store i64 0, ptr %s
   ret void
 }
 
@@ -338,9 +338,9 @@ define void @store_i64_with_unfolded_gep_offset(i64* %p) {
 ; CHECK: i32.store8 2($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}}
 define void @store_i32_with_folded_or_offset(i32 %x) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
-  store i8 0, i8* %arrayidx, align 1
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
+  store i8 0, ptr %arrayidx, align 1
   ret void
 }
 
@@ -352,22 +352,22 @@ define void @store_i32_with_folded_or_offset(i32 %x) {
 
 ; CHECK-LABEL: load_i8_i32_s_with_folded_offset:
 ; CHECK: i32.load8_s $push0=, 24($0){{$}}
-define i32 @load_i8_i32_s_with_folded_offset(i8* %p) {
-  %q = ptrtoint i8* %p to i32
+define i32 @load_i8_i32_s_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
-  %t = load i8, i8* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i8, ptr %s
   %u = sext i8 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
 ; CHECK: i64.load32_s $push0=, 24($0){{$}}
-define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define i64 @load_i32_i64_s_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i32, ptr %s
   %u = sext i32 %t to i64
   ret i64 %u
 }
@@ -376,27 +376,27 @@ define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
 
 ; CHECK-LABEL: load_i8_i32_s_with_folded_gep_offset:
 ; CHECK: i32.load8_s $push0=, 24($0){{$}}
-define i32 @load_i8_i32_s_with_folded_gep_offset(i8* %p) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
-  %t = load i8, i8* %s
+define i32 @load_i8_i32_s_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
+  %t = load i8, ptr %s
   %u = sext i8 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i16_i32_s_with_folded_gep_offset:
 ; CHECK: i32.load16_s $push0=, 48($0){{$}}
-define i32 @load_i16_i32_s_with_folded_gep_offset(i16* %p) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
-  %t = load i16, i16* %s
+define i32 @load_i16_i32_s_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
+  %t = load i16, ptr %s
   %u = sext i16 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i16_i64_s_with_folded_gep_offset:
 ; CHECK: i64.load16_s $push0=, 48($0){{$}}
-define i64 @load_i16_i64_s_with_folded_gep_offset(i16* %p) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
-  %t = load i16, i16* %s
+define i64 @load_i16_i64_s_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
+  %t = load i16, ptr %s
   %u = sext i16 %t to i64
   ret i64 %u
 }
@@ -408,9 +408,9 @@ define i64 @load_i16_i64_s_with_folded_gep_offset(i16* %p) {
 ; CHECK: i32.load8_s $push{{[0-9]+}}=, 2($pop{{[0-9]+}}){{$}}
 define i32 @load_i8_i32_s_with_folded_or_offset(i32 %x) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
-  %t1 = load i8, i8* %arrayidx
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
+  %t1 = load i8, ptr %arrayidx
   %conv = sext i8 %t1 to i32
   ret i32 %conv
 }
@@ -419,9 +419,9 @@ define i32 @load_i8_i32_s_with_folded_or_offset(i32 %x) {
 ; CHECK: i64.load8_s $push{{[0-9]+}}=, 2($pop{{[0-9]+}}){{$}}
 define i64 @load_i8_i64_s_with_folded_or_offset(i32 %x) {
   %and = and i32 %x, -4
-  %t0 = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
-  %t1 = load i8, i8* %arrayidx
+  %t0 = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %t0, i32 2
+  %t1 = load i8, ptr %arrayidx
   %conv = sext i8 %t1 to i64
   ret i64 %conv
 }
@@ -432,8 +432,8 @@ define i64 @load_i8_i64_s_with_folded_or_offset(i32 %x) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.load16_s  $push1=, 42($pop0){{$}}
 define i32 @load_i16_i32_s_from_numeric_address() {
-  %s = inttoptr i32 42 to i16*
-  %t = load i16, i16* %s
+  %s = inttoptr i32 42 to ptr
+  %t = load i16, ptr %s
   %u = sext i16 %t to i32
   ret i32 %u
 }
@@ -443,7 +443,7 @@ define i32 @load_i16_i32_s_from_numeric_address() {
 ; CHECK: i32.load8_s  $push1=, gv8($pop0){{$}}
 @gv8 = global i8 0
 define i32 @load_i8_i32_s_from_global_address() {
-  %t = load i8, i8* @gv8
+  %t = load i8, ptr @gv8
   %u = sext i8 %t to i32
   ret i32 %u
 }
@@ -456,22 +456,22 @@ define i32 @load_i8_i32_s_from_global_address() {
 
 ; CHECK-LABEL: load_i8_i32_z_with_folded_offset:
 ; CHECK: i32.load8_u $push0=, 24($0){{$}}
-define i32 @load_i8_i32_z_with_folded_offset(i8* %p) {
-  %q = ptrtoint i8* %p to i32
+define i32 @load_i8_i32_z_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
-  %t = load i8, i8* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i8, ptr %s
   %u = zext i8 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i32_i64_z_with_folded_offset:
 ; CHECK: i64.load32_u $push0=, 24($0){{$}}
-define i64 @load_i32_i64_z_with_folded_offset(i32* %p) {
-  %q = ptrtoint i32* %p to i32
+define i64 @load_i32_i64_z_with_folded_offset(ptr %p) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %t = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %t = load i32, ptr %s
   %u = zext i32 %t to i64
   ret i64 %u
 }
@@ -480,27 +480,27 @@ define i64 @load_i32_i64_z_with_folded_offset(i32* %p) {
 
 ; CHECK-LABEL: load_i8_i32_z_with_folded_gep_offset:
 ; CHECK: i32.load8_u $push0=, 24($0){{$}}
-define i32 @load_i8_i32_z_with_folded_gep_offset(i8* %p) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
-  %t = load i8, i8* %s
+define i32 @load_i8_i32_z_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
+  %t = load i8, ptr %s
   %u = zext i8 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i16_i32_z_with_folded_gep_offset:
 ; CHECK: i32.load16_u $push0=, 48($0){{$}}
-define i32 @load_i16_i32_z_with_folded_gep_offset(i16* %p) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
-  %t = load i16, i16* %s
+define i32 @load_i16_i32_z_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
+  %t = load i16, ptr %s
   %u = zext i16 %t to i32
   ret i32 %u
 }
 
 ; CHECK-LABEL: load_i16_i64_z_with_folded_gep_offset:
 ; CHECK: i64.load16_u $push0=, 48($0){{$}}
-define i64 @load_i16_i64_z_with_folded_gep_offset(i16* %p) {
-  %s = getelementptr inbounds i16, i16* %p, i64 24
-  %t = load i16, i16* %s
+define i64 @load_i16_i64_z_with_folded_gep_offset(ptr %p) {
+  %s = getelementptr inbounds i16, ptr %p, i64 24
+  %t = load i16, ptr %s
   %u = zext i16 %t to i64
   ret i64 %u
 }
@@ -511,8 +511,8 @@ define i64 @load_i16_i64_z_with_folded_gep_offset(i16* %p) {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.load16_u  $push1=, 42($pop0){{$}}
 define i32 @load_i16_i32_z_from_numeric_address() {
-  %s = inttoptr i32 42 to i16*
-  %t = load i16, i16* %s
+  %s = inttoptr i32 42 to ptr
+  %t = load i16, ptr %s
   %u = zext i16 %t to i32
   ret i32 %u
 }
@@ -521,7 +521,7 @@ define i32 @load_i16_i32_z_from_numeric_address() {
 ; CHECK: i32.const $push0=, 0{{$}}
 ; CHECK: i32.load8_u  $push1=, gv8($pop0){{$}}
 define i32 @load_i8_i32_z_from_global_address() {
-  %t = load i8, i8* @gv8
+  %t = load i8, ptr @gv8
   %u = zext i8 %t to i32
   ret i32 %u
 }
@@ -530,8 +530,8 @@ define i32 @load_i8_i32_z_from_global_address() {
 ; CHECK-LABEL: load_i8_i32_retvalue:
 ; CHECK: i32.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
-define i8 @load_i8_i32_retvalue(i8 *%p) {
-  %v = load i8, i8* %p
+define i8 @load_i8_i32_retvalue(ptr %p) {
+  %v = load i8, ptr %p
   ret i8 %v
 }
 
@@ -543,23 +543,23 @@ define i8 @load_i8_i32_retvalue(i8 *%p) {
 
 ; CHECK-LABEL: store_i8_i32_with_folded_offset:
 ; CHECK: i32.store8 24($0), $1{{$}}
-define void @store_i8_i32_with_folded_offset(i8* %p, i32 %v) {
-  %q = ptrtoint i8* %p to i32
+define void @store_i8_i32_with_folded_offset(ptr %p, i32 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
+  %s = inttoptr i32 %r to ptr
   %t = trunc i32 %v to i8
-  store i8 %t, i8* %s
+  store i8 %t, ptr %s
   ret void
 }
 
 ; CHECK-LABEL: store_i32_i64_with_folded_offset:
 ; CHECK: i64.store32 24($0), $1{{$}}
-define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) {
-  %q = ptrtoint i32* %p to i32
+define void @store_i32_i64_with_folded_offset(ptr %p, i64 %v) {
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
+  %s = inttoptr i32 %r to ptr
   %t = trunc i64 %v to i32
-  store i32 %t, i32* %s
+  store i32 %t, ptr %s
   ret void
 }
 
@@ -567,28 +567,28 @@ define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) {
 
 ; CHECK-LABEL: store_i8_i32_with_folded_gep_offset:
 ; CHECK: i32.store8 24($0), $1{{$}}
-define void @store_i8_i32_with_folded_gep_offset(i8* %p, i32 %v) {
-  %s = getelementptr inbounds i8, i8* %p, i32 24
+define void @store_i8_i32_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i8, ptr %p, i32 24
   %t = trunc i32 %v to i8
-  store i8 %t, i8* %s
+  store i8 %t, ptr %s
   ret void
 }
 
 ; CHECK-LABEL: store_i16_i32_with_folded_gep_offset:
 ; CHECK: i32.store16 48($0), $1{{$}}
-define void @store_i16_i32_with_folded_gep_offset(i16* %p, i32 %v) {
-  %s = getelementptr inbounds i16, i16* %p, i32 24
+define void @store_i16_i32_with_folded_gep_offset(ptr %p, i32 %v) {
+  %s = getelementptr inbounds i16, ptr %p, i32 24
   %t = trunc i32 %v to i16
-  store i16 %t, i16* %s
+  store i16 %t, ptr %s
   ret void
 }
 
 ; CHECK-LABEL: store_i16_i64_with_folded_gep_offset:
 ; CHECK: i64.store16 48($0), $1{{$}}
-define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) {
-  %s = getelementptr inbounds i16, i16* %p, i64 24
+define void @store_i16_i64_with_folded_gep_offset(ptr %p, i64 %v) {
+  %s = getelementptr inbounds i16, ptr %p, i64 24
   %t = trunc i64 %v to i16
-  store i16 %t, i16* %s
+  store i16 %t, ptr %s
   ret void
 }
 
@@ -599,10 +599,10 @@ define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) {
 ; CHECK: i32.store8 2($pop{{[0-9]+}}), $1{{$}}
 define void @store_i8_i32_with_folded_or_offset(i32 %x, i32 %v) {
   %and = and i32 %x, -4
-  %p = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
+  %p = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 2
   %t = trunc i32 %v to i8
-  store i8 %t, i8* %arrayidx
+  store i8 %t, ptr %arrayidx
   ret void
 }
 
@@ -610,10 +610,10 @@ define void @store_i8_i32_with_folded_or_offset(i32 %x, i32 %v) {
 ; CHECK: i64.store8 2($pop{{[0-9]+}}), $1{{$}}
 define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
   %and = and i32 %x, -4
-  %p = inttoptr i32 %and to i8*
-  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
+  %p = inttoptr i32 %and to ptr
+  %arrayidx = getelementptr inbounds i8, ptr %p, i32 2
   %t = trunc i64 %v to i8
-  store i8 %t, i8* %arrayidx
+  store i8 %t, ptr %arrayidx
   ret void
 }
 
@@ -632,10 +632,10 @@ define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
 ; CHECK: i32.store 8($1), $4{{$}}
 ; CHECK: i32.store 4($1), $3{{$}}
 ; CHECK: i32.store 0($1), $2{{$}}
-define void @aggregate_load_store({i32,i32,i32,i32}* %p, {i32,i32,i32,i32}* %q) {
+define void @aggregate_load_store(ptr %p, ptr %q) {
   ; volatile so that things stay in order for the tests above
-  %t = load volatile {i32,i32,i32,i32}, {i32, i32,i32,i32}* %p
-  store volatile {i32,i32,i32,i32} %t, {i32, i32,i32,i32}* %q
+  %t = load volatile {i32,i32,i32,i32}, ptr %p
+  store volatile {i32,i32,i32,i32} %t, ptr %q
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/only-data.ll b/llvm/test/CodeGen/WebAssembly/only-data.ll
index bf4979d02252..075feece36b9 100644
--- a/llvm/test/CodeGen/WebAssembly/only-data.ll
+++ b/llvm/test/CodeGen/WebAssembly/only-data.ll
@@ -4,7 +4,7 @@
 ;      CHECK: .type foo, at object
 ; CHECK-NEXT: .section .data.foo,"",@
 ; CHECK-NEXT: .globl foo
- at foo = local_unnamed_addr global i32 (i32)* @bar, align 4
+ at foo = local_unnamed_addr global ptr @bar, align 4
 
 ; CHECK-LABEL: foo:
 ;  CHECK-NEXT: .int32 bar

diff  --git a/llvm/test/CodeGen/WebAssembly/pr47375.ll b/llvm/test/CodeGen/WebAssembly/pr47375.ll
index 4934c4de0541..400380fb2077 100644
--- a/llvm/test/CodeGen/WebAssembly/pr47375.ll
+++ b/llvm/test/CodeGen/WebAssembly/pr47375.ll
@@ -28,8 +28,8 @@ define void @sext_vec() {
 ; CHECK-NEXT:    i32.and
 ; CHECK-NEXT:    i32.store16 0
 ; CHECK-NEXT:    # fallthrough-return
-  %L1 = load <2 x i3>, <2 x i3>* undef, align 2
+  %L1 = load <2 x i3>, ptr undef, align 2
   %zext = zext <2 x i3> %L1 to <2 x i10>
-  store <2 x i10> %zext, <2 x i10>* undef, align 4
+  store <2 x i10> %zext, ptr undef, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/pr51651.ll b/llvm/test/CodeGen/WebAssembly/pr51651.ll
index 70ddcf07dc8e..56aa99b97f3f 100644
--- a/llvm/test/CodeGen/WebAssembly/pr51651.ll
+++ b/llvm/test/CodeGen/WebAssembly/pr51651.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -O0 -mtriple=wasm32-unknown-unknown -wasm-disable-explicit-locals -wasm-keep-registers < %s | FileCheck %s
 
-define i32 @test(i8* %p, i8* %p2) {
+define i32 @test(ptr %p, ptr %p2) {
 ; CHECK-LABEL: test:
 ; CHECK:         .functype test (i32, i32) -> (i32)
 ; CHECK-NEXT:  # %bb.0:
@@ -20,12 +20,12 @@ define i32 @test(i8* %p, i8* %p2) {
 ; CHECK-NEXT:    end_block # label0:
 ; CHECK-NEXT:    i32.const $7=, 1
 ; CHECK-NEXT:    return $7
-  %v = load i8, i8* %p
+  %v = load i8, ptr %p
   %v.ext = zext i8 %v to i32
   %cond = icmp eq i32 %v.ext, 0
   ; Cause FastISel abort.
   %shl = shl i8 %v, 0
-  store i8 %shl, i8* %p2
+  store i8 %shl, ptr %p2
   br label %bb2
 
 bb2:

diff  --git a/llvm/test/CodeGen/WebAssembly/pr58904.ll b/llvm/test/CodeGen/WebAssembly/pr58904.ll
index b8d5e397be02..5fc32ff8701c 100644
--- a/llvm/test/CodeGen/WebAssembly/pr58904.ll
+++ b/llvm/test/CodeGen/WebAssembly/pr58904.ll
@@ -14,7 +14,7 @@ define i64 @PR58904() {
 ; CHECK-NEXT:    # fallthrough-return
 BB:
   %A = alloca i64
-  %C2 = ptrtoint i64* %A to i64
+  %C2 = ptrtoint ptr %A to i64
   %B2 = urem i64 %C2, -1
   ret i64 %B2
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/reg-stackify.ll b/llvm/test/CodeGen/WebAssembly/reg-stackify.ll
index e06a1dcc0071..e3b49c3faa11 100644
--- a/llvm/test/CodeGen/WebAssembly/reg-stackify.ll
+++ b/llvm/test/CodeGen/WebAssembly/reg-stackify.ll
@@ -14,9 +14,9 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK: return $1{{$}}
 ; NOREGS-LABEL: no0:
 ; NOREGS: return{{$}}
-define i32 @no0(i32* %p, i32* %q) {
-  %t = load i32, i32* %q
-  store i32 0, i32* %p
+define i32 @no0(ptr %p, ptr %q) {
+  %t = load i32, ptr %q
+  store i32 0, ptr %p
   ret i32 %t
 }
 
@@ -26,9 +26,9 @@ define i32 @no0(i32* %p, i32* %q) {
 ; CHECK: return $1{{$}}
 ; NOREGS-LABEL: no1:
 ; NOREGS: return{{$}}
-define i32 @no1(i32* %p, i32* dereferenceable(4) %q) {
-  %t = load volatile i32, i32* %q, !invariant.load !0
-  store volatile i32 0, i32* %p
+define i32 @no1(ptr %p, ptr dereferenceable(4) %q) {
+  %t = load volatile i32, ptr %q, !invariant.load !0
+  store volatile i32 0, ptr %p
   ret i32 %t
 }
 
@@ -38,9 +38,9 @@ define i32 @no1(i32* %p, i32* dereferenceable(4) %q) {
 ; CHECK: return $pop{{[0-9]+}}{{$}}
 ; NOREGS-LABEL: yes0:
 ; NOREGS: return{{$}}
-define i32 @yes0(i32* %p, i32* dereferenceable(4) %q) {
-  %t = load i32, i32* %q, !invariant.load !0
-  store i32 0, i32* %p
+define i32 @yes0(ptr %p, ptr dereferenceable(4) %q) {
+  %t = load i32, ptr %q, !invariant.load !0
+  store i32 0, ptr %p
   ret i32 %t
 }
 
@@ -50,8 +50,8 @@ define i32 @yes0(i32* %p, i32* dereferenceable(4) %q) {
 ; CHECK: return $pop0{{$}}
 ; NOREGS-LABEL: yes1:
 ; NOREGS: return{{$}}
-define i32 @yes1(i32* %q) {
-  %t = load volatile i32, i32* %q
+define i32 @yes1(ptr %q) {
+  %t = load volatile i32, ptr %q
   ret i32 %t
 }
 
@@ -61,9 +61,9 @@ define i32 @yes1(i32* %q) {
 ; CHECK: return $pop{{[0-9]+}}{{$}}
 ; NOREGS-LABEL: sink_trap:
 ; NOREGS: return{{$}}
-define i32 @sink_trap(i32 %x, i32 %y, i32* %p) {
+define i32 @sink_trap(i32 %x, i32 %y, ptr %p) {
   %t = sdiv i32 %x, %y
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   ret i32 %t
 }
 
@@ -74,9 +74,9 @@ define i32 @sink_trap(i32 %x, i32 %y, i32* %p) {
 ; NOREGS-LABEL: sink_readnone_call:
 ; NOREGS: return{{$}}
 declare i32 @readnone_callee() readnone nounwind
-define i32 @sink_readnone_call(i32 %x, i32 %y, i32* %p) {
+define i32 @sink_readnone_call(i32 %x, i32 %y, ptr %p) {
   %t = call i32 @readnone_callee()
-  store volatile i32 0, i32* %p
+  store volatile i32 0, ptr %p
   ret i32 %t
 }
 
@@ -87,9 +87,9 @@ define i32 @sink_readnone_call(i32 %x, i32 %y, i32* %p) {
 ; NOREGS-LABEL: no_sink_readonly_call:
 ; NOREGS: return{{$}}
 declare i32 @readonly_callee() readonly nounwind
-define i32 @no_sink_readonly_call(i32 %x, i32 %y, i32* %p) {
+define i32 @no_sink_readonly_call(i32 %x, i32 %y, ptr %p) {
   %t = call i32 @readonly_callee()
-  store i32 0, i32* %p
+  store i32 0, ptr %p
   ret i32 %t
 }
 
@@ -196,20 +196,20 @@ false:
 ; NOREGS-NEXT: .LBB{{[0-9]+}}_3:
 ; NOREGS-NEXT: end_block{{$}}
 ; NOREGS-NEXT: return{{$}}
-define void @multiple_uses(i32* %arg0, i32* %arg1, i32* %arg2) nounwind {
+define void @multiple_uses(ptr %arg0, ptr %arg1, ptr %arg2) nounwind {
 bb:
   br label %loop
 
 loop:
-  %tmp7 = load i32, i32* %arg2
-  %tmp8 = inttoptr i32 %tmp7 to i32*
-  %tmp9 = icmp uge i32* %tmp8, %arg1
-  %tmp10 = icmp ult i32* %tmp8, %arg0
+  %tmp7 = load i32, ptr %arg2
+  %tmp8 = inttoptr i32 %tmp7 to ptr
+  %tmp9 = icmp uge ptr %tmp8, %arg1
+  %tmp10 = icmp ult ptr %tmp8, %arg0
   %tmp11 = or i1 %tmp9, %tmp10
   br i1 %tmp11, label %back, label %then
 
 then:
-  store i32 %tmp7, i32* %arg2
+  store i32 %tmp7, ptr %arg2
   br label %back
 
 back:
@@ -232,11 +232,11 @@ return:
 ; NOREGS:      store
 ; NOREGS-NEXT: call
 declare void @evoke_side_effects()
-define hidden void @stackify_store_across_side_effects(double* nocapture %d) {
+define hidden void @stackify_store_across_side_effects(ptr nocapture %d) {
 entry:
-  store double 2.0, double* %d
+  store double 2.0, ptr %d
   call void @evoke_side_effects()
-  store double 2.0, double* %d
+  store double 2.0, ptr %d
   call void @evoke_side_effects()
   ret void
 }
@@ -526,7 +526,7 @@ exit:
 @count = hidden global i32 0, align 4
 define i32 @no_stackify_call_past_load() {
   %a = call i32 @red()
-  %b = load i32, i32* @count, align 4
+  %b = load i32, ptr @count, align 4
   call i32 @callee(i32 %a)
   ret i32 %b
   ; use of a
@@ -541,9 +541,9 @@ define i32 @no_stackify_call_past_load() {
 ; NOREGS: i32.store 0
 ; NOREGS: i32.load 0
 ; NOREGS: call callee{{$}}
-define i32 @no_stackify_store_past_load(i32 %a, i32* %p1, i32* %p2) {
-  store i32 %a, i32* %p1
-  %b = load i32, i32* %p2, align 4
+define i32 @no_stackify_store_past_load(i32 %a, ptr %p1, ptr %p2) {
+  store i32 %a, ptr %p1
+  %b = load i32, ptr %p2, align 4
   call i32 @callee(i32 %a)
   ret i32 %b
 }
@@ -559,9 +559,9 @@ define i32 @no_stackify_store_past_load(i32 %a, i32* %p1, i32* %p2) {
 ; NOREGS: call callee
 ; NOREGS: i32.load 0
 ; NOREGS: return
-define i32 @store_past_invar_load(i32 %a, i32* %p1, i32* dereferenceable(4) %p2) {
-  store i32 %a, i32* %p1
-  %b = load i32, i32* %p2, !invariant.load !0
+define i32 @store_past_invar_load(i32 %a, ptr %p1, ptr dereferenceable(4) %p2) {
+  store i32 %a, ptr %p1
+  %b = load i32, ptr %p2, !invariant.load !0
   call i32 @callee(i32 %a)
   ret i32 %b
 }
@@ -585,10 +585,10 @@ define void @ignore_dbg_value() {
 ; CHECK: return ${{[0-9]+}}{{$}}
 ; NOREGS-LABEL: no_stackify_past_epilogue:
 ; NOREGS: return{{$}}
-declare i32 @use_memory(i32*)
+declare i32 @use_memory(ptr)
 define i32 @no_stackify_past_epilogue() {
   %x = alloca i32
-  %call = call i32 @use_memory(i32* %x)
+  %call = call i32 @use_memory(ptr %x)
   ret i32 %call
 }
 
@@ -604,15 +604,15 @@ define i32 @no_stackify_past_epilogue() {
 ; NOREGS-NEXT:        i32.add
 ; NOREGS-NEXT:        local.tee   2{{$}}
 ; NOREGS-NEXT:        i32.ne
-define void @stackify_indvar(i32 %tmp, i32* %v) #0 {
+define void @stackify_indvar(i32 %tmp, ptr %v) #0 {
 bb:
   br label %bb3
 
 bb3:                                              ; preds = %bb3, %bb2
   %tmp4 = phi i32 [ %tmp7, %bb3 ], [ 0, %bb ]
-  %tmp5 = load volatile i32, i32* %v, align 4
+  %tmp5 = load volatile i32, ptr %v, align 4
   %tmp6 = add nsw i32 %tmp5, %tmp4
-  store volatile i32 %tmp6, i32* %v, align 4
+  store volatile i32 %tmp6, ptr %v, align 4
   %tmp7 = add nuw nsw i32 %tmp4, 1
   %tmp8 = icmp eq i32 %tmp7, %tmp
   br i1 %tmp8, label %bb10, label %bb3
@@ -629,11 +629,11 @@ bb10:                                             ; preds = %bb9, %bb
 ; NOREGS-LABEL: stackpointer_dependency:
 ; NOREGS:      call stackpointer_callee
 ; NOREGS:      global.set __stack_pointer
-declare i32 @stackpointer_callee(i8* readnone, i8* readnone) nounwind readnone
-declare i8* @llvm.frameaddress(i32)
-define i32 @stackpointer_dependency(i8* readnone) {
-  %2 = tail call i8* @llvm.frameaddress(i32 0)
-  %3 = tail call i32 @stackpointer_callee(i8* %0, i8* %2)
+declare i32 @stackpointer_callee(ptr readnone, ptr readnone) nounwind readnone
+declare ptr @llvm.frameaddress(i32)
+define i32 @stackpointer_dependency(ptr readnone) {
+  %2 = tail call ptr @llvm.frameaddress(i32 0)
+  %3 = tail call i32 @stackpointer_callee(ptr %0, ptr %2)
   ret i32 %3
 }
 
@@ -651,14 +651,12 @@ define i32 @stackpointer_dependency(i8* readnone) {
 ; NOREGS:      i32.load  0
 ; NOREGS-NEXT: i32.load  0
 ; NOREGS-NEXT: call_indirect (i32, i32) -> (i32)
-%class.call_indirect = type { i32 (...)** }
-define i32 @call_indirect_stackify(%class.call_indirect** %objptr, i32 %arg) {
-  %obj = load %class.call_indirect*, %class.call_indirect** %objptr
-  %addr = bitcast %class.call_indirect* %obj to i32(%class.call_indirect*, i32)***
-  %vtable = load i32(%class.call_indirect*, i32)**, i32(%class.call_indirect*, i32)*** %addr
-  %vfn = getelementptr inbounds i32(%class.call_indirect*, i32)*, i32(%class.call_indirect*, i32)** %vtable, i32 0
-  %f = load i32(%class.call_indirect*, i32)*, i32(%class.call_indirect*, i32)** %vfn
-  %ret = call i32 %f(%class.call_indirect* %obj, i32 %arg)
+%class.call_indirect = type { ptr }
+define i32 @call_indirect_stackify(ptr %objptr, i32 %arg) {
+  %obj = load ptr, ptr %objptr
+  %vtable = load ptr, ptr %obj
+  %f = load ptr, ptr %vtable
+  %ret = call i32 %f(ptr %obj, i32 %arg)
   ret i32 %ret
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/return-address-emscripten.ll b/llvm/test/CodeGen/WebAssembly/return-address-emscripten.ll
index 712f4aaf3235..ee8110e36727 100644
--- a/llvm/test/CodeGen/WebAssembly/return-address-emscripten.ll
+++ b/llvm/test/CodeGen/WebAssembly/return-address-emscripten.ll
@@ -8,10 +8,10 @@
 ; CHECK-NEXT: {{^}} i32.const 0{{$}}
 ; CHECK-NEXT: {{^}} call emscripten_return_address{{$}}
 ; CHECK-NEXT: {{^}} end_function{{$}}
-define i8* @test_returnaddress() {
-  %r = call i8* @llvm.returnaddress(i32 0)
-  ret i8* %r
+define ptr @test_returnaddress() {
+  %r = call ptr @llvm.returnaddress(i32 0)
+  ret ptr %r
 }
 
 ; LLVM represents __builtin_return_address as call to this function in IR.
-declare i8* @llvm.returnaddress(i32 immarg)
+declare ptr @llvm.returnaddress(i32 immarg)

diff  --git a/llvm/test/CodeGen/WebAssembly/return-address-unknown.ll b/llvm/test/CodeGen/WebAssembly/return-address-unknown.ll
index bb89650e7099..a2df0c189f29 100644
--- a/llvm/test/CodeGen/WebAssembly/return-address-unknown.ll
+++ b/llvm/test/CodeGen/WebAssembly/return-address-unknown.ll
@@ -6,10 +6,10 @@ target triple = "wasm32-unknown-unknown"
 ; Since this is not implemented, it should fail.
 
 ; CHECK: Non-Emscripten WebAssembly hasn't implemented __builtin_return_address
-define i8* @test_returnaddress() {
-  %r = call i8* @llvm.returnaddress(i32 0)
-  ret i8* %r
+define ptr @test_returnaddress() {
+  %r = call ptr @llvm.returnaddress(i32 0)
+  ret ptr %r
 }
 
 ; LLVM represents __builtin_return_address as call to this function in IR.
-declare i8* @llvm.returnaddress(i32 immarg)
+declare ptr @llvm.returnaddress(i32 immarg)

diff  --git a/llvm/test/CodeGen/WebAssembly/return-int32.ll b/llvm/test/CodeGen/WebAssembly/return-int32.ll
index 590ee1de16d2..32fbc10d412f 100644
--- a/llvm/test/CodeGen/WebAssembly/return-int32.ll
+++ b/llvm/test/CodeGen/WebAssembly/return-int32.ll
@@ -23,10 +23,10 @@ define i32 @return_i32_twice(i32 %a) {
   br i1 %b, label %true, label %false
 
 true:
-  store i32 0, i32* null
+  store i32 0, ptr null
   ret i32 1
 
 false:
-  store i32 2, i32* null
+  store i32 2, ptr null
   ret i32 3
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/return-void.ll b/llvm/test/CodeGen/WebAssembly/return-void.ll
index 4e38ef207e27..7f2ef0916684 100644
--- a/llvm/test/CodeGen/WebAssembly/return-void.ll
+++ b/llvm/test/CodeGen/WebAssembly/return-void.ll
@@ -19,10 +19,10 @@ define void @return_void_twice(i32 %a) {
   br i1 %b, label %true, label %false
 
 true:
-  store i32 0, i32* null
+  store i32 0, ptr null
   ret void
 
 false:
-  store i32 1, i32* null
+  store i32 1, ptr null
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/returned.ll b/llvm/test/CodeGen/WebAssembly/returned.ll
index a78b30a1491c..b00a670a015a 100644
--- a/llvm/test/CodeGen/WebAssembly/returned.ll
+++ b/llvm/test/CodeGen/WebAssembly/returned.ll
@@ -11,25 +11,24 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-NEXT: {{^}} call      $push2=, _ZN5AppleC1Ev, $pop1{{$}}
 ; CHECK-NEXT: return    $pop2{{$}}
 %class.Apple = type { i8 }
-declare noalias i8* @_Znwm(i32)
-declare %class.Apple* @_ZN5AppleC1Ev(%class.Apple* returned)
-define %class.Apple* @_Z3foov() {
+declare noalias ptr @_Znwm(i32)
+declare ptr @_ZN5AppleC1Ev(ptr returned)
+define ptr @_Z3foov() {
 entry:
-  %call = tail call noalias i8* @_Znwm(i32 1)
-  %0 = bitcast i8* %call to %class.Apple*
-  %call1 = tail call %class.Apple* @_ZN5AppleC1Ev(%class.Apple* %0)
-  ret %class.Apple* %0
+  %call = tail call noalias ptr @_Znwm(i32 1)
+  %call1 = tail call ptr @_ZN5AppleC1Ev(ptr %call)
+  ret ptr %call
 }
 
 ; CHECK-LABEL: _Z3barPvS_l:
 ; CHECK-NEXT: .functype _Z3barPvS_l (i32, i32, i32) -> (i32){{$}}
 ; CHECK-NEXT: {{^}} call     $push0=, memcpy, $0, $1, $2{{$}}
 ; CHECK-NEXT: return   $pop0{{$}}
-declare i8* @memcpy(i8* returned, i8*, i32)
-define i8* @_Z3barPvS_l(i8* %p, i8* %s, i32 %n) {
+declare ptr @memcpy(ptr returned, ptr, i32)
+define ptr @_Z3barPvS_l(ptr %p, ptr %s, i32 %n) {
 entry:
-  %call = tail call i8* @memcpy(i8* %p, i8* %s, i32 %n)
-  ret i8* %p
+  %call = tail call ptr @memcpy(ptr %p, ptr %s, i32 %n)
+  ret ptr %p
 }
 
 ; Test that the optimization isn't performed on constant arguments.
@@ -39,12 +38,12 @@ entry:
 ; CHECK-NEXT: {{^}} call        $drop=, returns_arg, $pop0{{$}}
 ; CHECK-NEXT: return{{$}}
 @global = external global i32
- at addr = global i32* @global
+ at addr = global ptr @global
 define void @test_constant_arg() {
-  %call = call i32* @returns_arg(i32* @global)
+  %call = call ptr @returns_arg(ptr @global)
   ret void
 }
-declare i32* @returns_arg(i32* returned)
+declare ptr @returns_arg(ptr returned)
 
 ; Test that the optimization isn't performed on arguments without the
 ; "returned" attribute.

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-build-pair.ll b/llvm/test/CodeGen/WebAssembly/simd-build-pair.ll
index 5dc022f2b12a..af1d80afaf3b 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-build-pair.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-build-pair.ll
@@ -6,20 +6,20 @@ target triple = "wasm32-unknown-unknown"
 ; This code produces a selection DAG like the following:
 
 ;    t0: ch = EntryToken
-;  t3: v4i32,ch = load<(load 16 from `<4 x i32>* undef`)> t0, undef:i32, undef:i32
+;  t3: v4i32,ch = load<(load 16 from `ptr undef`)> t0, undef:i32, undef:i32
 ;        t30: i32 = extract_vector_elt t3, Constant:i32<2>
 ;        t28: i32 = extract_vector_elt t3, Constant:i32<3>
 ;      t24: i64 = build_pair t30, t28
-;    t8: ch = store<(store 8 into `i64* undef`, align 1)> t3:1, t24, undef:i32, undef:i32
+;    t8: ch = store<(store 8 into `ptr undef`, align 1)> t3:1, t24, undef:i32, undef:i32
 ;  t9: ch = WebAssemblyISD::RETURN t8
 
 ; CHECK:      v128.store64_lane
 define void @build_pair_i32s() {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* undef, align 16
+  %0 = load <4 x i32>, ptr undef, align 16
   %shuffle.i184 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
   %bc357 = bitcast <4 x i32> %shuffle.i184 to <2 x i64>
   %1 = extractelement <2 x i64> %bc357, i32 0
-  store i64 %1, i64* undef, align 1
+  store i64 %1, ptr undef, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll
index 124ec36be643..52d73b17b220 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll
@@ -19,12 +19,12 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK:         .functype foo (i32) -> ()
 ; Implementation omitted...
 ; CHECK:         return
-define void @foo(<4 x i8>* %p) {
-  %1 = load <4 x i8>, <4 x i8>* %p
+define void @foo(ptr %p) {
+  %1 = load <4 x i8>, ptr %p
   %2 = sitofp <4 x i8> %1 to <4 x double>
   %3 = fmul <4 x double> zeroinitializer, %2
   %4 = fadd <4 x double> %3, zeroinitializer
   %5 = fptrunc <4 x double> %4 to <4 x float>
-  store <4 x float> %5, <4 x float>* undef
+  store <4 x float> %5, ptr undef
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-illegal-signext.ll b/llvm/test/CodeGen/WebAssembly/simd-illegal-signext.ll
index c4380467cb2f..9e9bcbf21fe8 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-illegal-signext.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-illegal-signext.ll
@@ -13,11 +13,11 @@ target triple = "wasm32-unknown-emscripten"
 ; CHECK-NEXT: i32.store16
 define void @foo() {
 entry:
-  %0 = load i32*, i32** undef, align 4
-  %1 = load i32, i32* %0, align 4
-  %2 = load i32, i32* undef, align 4
+  %0 = load ptr, ptr undef, align 4
+  %1 = load i32, ptr %0, align 4
+  %2 = load i32, ptr undef, align 4
   %conv67 = trunc i32 %2 to i8
   %conv68 = sext i8 %conv67 to i16
-  store i16 %conv68, i16* null, align 2
+  store i16 %conv68, ptr null, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
index d76da7657743..77191f4eb0c4 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
@@ -12,7 +12,7 @@ target triple = "wasm32-unknown-unknown"
 ; v128.load8_lane / v128.store8_lane
 ;===----------------------------------------------------------------------------
 
-define <16 x i8> @load_lane_i8_no_offset(i8* %p, <16 x i8> %v) {
+define <16 x i8> @load_lane_i8_no_offset(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: load_lane_i8_no_offset:
 ; CHECK:         .functype load_lane_i8_no_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -20,12 +20,12 @@ define <16 x i8> @load_lane_i8_no_offset(i8* %p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i8, i8* %p
+  %x = load i8, ptr %p
   %t = insertelement <16 x i8> %v, i8 %x, i32 0
   ret <16 x i8> %t
 }
 
-define <16 x i8> @load_lane_i8_with_folded_offset(i8* %p, <16 x i8> %v) {
+define <16 x i8> @load_lane_i8_with_folded_offset(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: load_lane_i8_with_folded_offset:
 ; CHECK:         .functype load_lane_i8_with_folded_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -35,15 +35,15 @@ define <16 x i8> @load_lane_i8_with_folded_offset(i8* %p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i8* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
-  %x = load i8, i8* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i8, ptr %s
   %t = insertelement <16 x i8> %v, i8 %x, i32 0
   ret <16 x i8> %t
 }
 
-define <16 x i8> @load_lane_i8_with_folded_gep_offset(i8* %p, <16 x i8> %v) {
+define <16 x i8> @load_lane_i8_with_folded_gep_offset(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: load_lane_i8_with_folded_gep_offset:
 ; CHECK:         .functype load_lane_i8_with_folded_gep_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -53,13 +53,13 @@ define <16 x i8> @load_lane_i8_with_folded_gep_offset(i8* %p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i8, i8* %p, i32 6
-  %x = load i8, i8* %s
+  %s = getelementptr inbounds i8, ptr %p, i32 6
+  %x = load i8, ptr %s
   %t = insertelement <16 x i8> %v, i8 %x, i32 0
   ret <16 x i8> %t
 }
 
-define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(i8* %p, <16 x i8> %v) {
+define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: load_lane_i8_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_lane_i8_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -69,13 +69,13 @@ define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(i8* %p, <16 x i
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i8, i8* %p, i32 -6
-  %x = load i8, i8* %s
+  %s = getelementptr inbounds i8, ptr %p, i32 -6
+  %x = load i8, ptr %s
   %t = insertelement <16 x i8> %v, i8 %x, i32 0
   ret <16 x i8> %t
 }
 
-define <16 x i8> @load_lane_i8_with_unfolded_offset(i8* %p, <16 x i8> %v) {
+define <16 x i8> @load_lane_i8_with_unfolded_offset(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: load_lane_i8_with_unfolded_offset:
 ; CHECK:         .functype load_lane_i8_with_unfolded_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -85,15 +85,15 @@ define <16 x i8> @load_lane_i8_with_unfolded_offset(i8* %p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i8* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
-  %x = load i8, i8* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i8, ptr %s
   %t = insertelement <16 x i8> %v, i8 %x, i32 0
   ret <16 x i8> %t
 }
 
-define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(i8* %p, <16 x i8> %v) {
+define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: load_lane_i8_with_unfolded_gep_offset:
 ; CHECK:         .functype load_lane_i8_with_unfolded_gep_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -103,8 +103,8 @@ define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(i8* %p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i8, i8* %p, i32 6
-  %x = load i8, i8* %s
+  %s = getelementptr i8, ptr %p, i32 6
+  %x = load i8, ptr %s
   %t = insertelement <16 x i8> %v, i8 %x, i32 0
   ret <16 x i8> %t
 }
@@ -117,8 +117,8 @@ define <16 x i8> @load_lane_i8_from_numeric_address(<16 x i8> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i8*
-  %x = load i8, i8* %s
+  %s = inttoptr i32 42 to ptr
+  %x = load i8, ptr %s
   %t = insertelement <16 x i8> %v, i8 %x, i32 0
   ret <16 x i8> %t
 }
@@ -132,12 +132,12 @@ define <16 x i8> @load_lane_i8_from_global_address(<16 x i8> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i8, i8* @gv_i8
+  %x = load i8, ptr @gv_i8
   %t = insertelement <16 x i8> %v, i8 %x, i32 0
   ret <16 x i8> %t
 }
 
-define void @store_lane_i8_no_offset(<16 x i8> %v, i8* %p) {
+define void @store_lane_i8_no_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i8_no_offset:
 ; CHECK:         .functype store_lane_i8_no_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -146,11 +146,11 @@ define void @store_lane_i8_no_offset(<16 x i8> %v, i8* %p) {
 ; CHECK-NEXT:    v128.store8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %p
+  store i8 %x, ptr %p
   ret void
 }
 
-define void @store_lane_i8_with_folded_offset(<16 x i8> %v, i8* %p) {
+define void @store_lane_i8_with_folded_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i8_with_folded_offset:
 ; CHECK:         .functype store_lane_i8_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -158,15 +158,15 @@ define void @store_lane_i8_with_folded_offset(<16 x i8> %v, i8* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store8_lane 24, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i8* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
+  %s = inttoptr i32 %r to ptr
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %s
+  store i8 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, i8* %p) {
+define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i8_with_folded_gep_offset:
 ; CHECK:         .functype store_lane_i8_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -174,13 +174,13 @@ define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, i8* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store8_lane 6, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i8, i8* %p, i32 6
+  %s = getelementptr inbounds i8, ptr %p, i32 6
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %s
+  store i8 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, i8* %p) {
+define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i8_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_lane_i8_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -190,13 +190,13 @@ define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, i8* %
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i8, i8* %p, i32 -6
+  %s = getelementptr inbounds i8, ptr %p, i32 -6
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %s
+  store i8 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, i8* %p) {
+define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i8_with_unfolded_offset:
 ; CHECK:         .functype store_lane_i8_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -206,15 +206,15 @@ define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, i8* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i8* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i8*
+  %s = inttoptr i32 %r to ptr
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %s
+  store i8 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, i8* %p) {
+define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i8_with_unfolded_gep_offset:
 ; CHECK:         .functype store_lane_i8_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -224,9 +224,9 @@ define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, i8* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i8, i8* %p, i32 6
+  %s = getelementptr i8, ptr %p, i32 6
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %s
+  store i8 %x, ptr %s
   ret void
 }
 
@@ -238,9 +238,9 @@ define void @store_lane_i8_to_numeric_address(<16 x i8> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store8_lane 42, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i8*
+  %s = inttoptr i32 42 to ptr
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %s
+  store i8 %x, ptr %s
   ret void
 }
 
@@ -253,7 +253,7 @@ define void @store_lane_i8_from_global_address(<16 x i8> %v) {
 ; CHECK-NEXT:    v128.store8_lane gv_i8, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* @gv_i8
+  store i8 %x, ptr @gv_i8
   ret void
 }
 
@@ -261,7 +261,7 @@ define void @store_lane_i8_from_global_address(<16 x i8> %v) {
 ; v128.load16_lane / v128.store16_lane
 ;===----------------------------------------------------------------------------
 
-define <8 x i16> @load_lane_i16_no_offset(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_no_offset(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_no_offset:
 ; CHECK:         .functype load_lane_i16_no_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -269,12 +269,12 @@ define <8 x i16> @load_lane_i16_no_offset(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i16, i16* %p
+  %x = load i16, ptr %p
   %t = insertelement <8 x i16> %v, i16 %x, i32 0
   ret <8 x i16> %t
 }
 
-define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_with_folded_offset(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_with_folded_offset:
 ; CHECK:         .functype load_lane_i16_with_folded_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -284,15 +284,15 @@ define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i16* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i16*
-  %x = load i16, i16* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i16, ptr %s
   %t = insertelement <8 x i16> %v, i16 %x, i32 0
   ret <8 x i16> %t
 }
 
-define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_with_folded_gep_offset(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_with_folded_gep_offset:
 ; CHECK:         .functype load_lane_i16_with_folded_gep_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -302,13 +302,13 @@ define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i16, i16* %p, i32 6
-  %x = load i16, i16* %s
+  %s = getelementptr inbounds i16, ptr %p, i32 6
+  %x = load i16, ptr %s
   %t = insertelement <8 x i16> %v, i16 %x, i32 0
   ret <8 x i16> %t
 }
 
-define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_lane_i16_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -318,13 +318,13 @@ define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p, <8 x
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i16, i16* %p, i32 -6
-  %x = load i16, i16* %s
+  %s = getelementptr inbounds i16, ptr %p, i32 -6
+  %x = load i16, ptr %s
   %t = insertelement <8 x i16> %v, i16 %x, i32 0
   ret <8 x i16> %t
 }
 
-define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_with_unfolded_offset(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_with_unfolded_offset:
 ; CHECK:         .functype load_lane_i16_with_unfolded_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -334,15 +334,15 @@ define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i16* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i16*
-  %x = load i16, i16* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i16, ptr %s
   %t = insertelement <8 x i16> %v, i16 %x, i32 0
   ret <8 x i16> %t
 }
 
-define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_with_unfolded_gep_offset:
 ; CHECK:         .functype load_lane_i16_with_unfolded_gep_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -352,8 +352,8 @@ define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p, <8 x i16> %v)
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i16, i16* %p, i32 6
-  %x = load i16, i16* %s
+  %s = getelementptr i16, ptr %p, i32 6
+  %x = load i16, ptr %s
   %t = insertelement <8 x i16> %v, i16 %x, i32 0
   ret <8 x i16> %t
 }
@@ -366,8 +366,8 @@ define <8 x i16> @load_lane_i16_from_numeric_address(<8 x i16> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i16*
-  %x = load i16, i16* %s
+  %s = inttoptr i32 42 to ptr
+  %x = load i16, ptr %s
   %t = insertelement <8 x i16> %v, i16 %x, i32 0
   ret <8 x i16> %t
 }
@@ -381,12 +381,12 @@ define <8 x i16> @load_lane_i16_from_global_address(<8 x i16> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i16, i16* @gv_i16
+  %x = load i16, ptr @gv_i16
   %t = insertelement <8 x i16> %v, i16 %x, i32 0
   ret <8 x i16> %t
 }
 
-define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_no_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_no_offset:
 ; CHECK:         .functype store_lane_i16_no_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -395,11 +395,11 @@ define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    v128.store16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %p
+  store i16 %x, ptr %p
   ret void
 }
 
-define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_with_folded_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_with_folded_offset:
 ; CHECK:         .functype store_lane_i16_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -407,15 +407,15 @@ define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store16_lane 24, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i16* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i16*
+  %s = inttoptr i32 %r to ptr
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %s
+  store i16 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_with_folded_gep_offset:
 ; CHECK:         .functype store_lane_i16_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -423,13 +423,13 @@ define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store16_lane 12, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i16, i16* %p, i32 6
+  %s = getelementptr inbounds i16, ptr %p, i32 6
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %s
+  store i16 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_lane_i16_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -439,13 +439,13 @@ define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16*
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i16, i16* %p, i32 -6
+  %s = getelementptr inbounds i16, ptr %p, i32 -6
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %s
+  store i16 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_with_unfolded_offset:
 ; CHECK:         .functype store_lane_i16_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -455,15 +455,15 @@ define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i16* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i16*
+  %s = inttoptr i32 %r to ptr
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %s
+  store i16 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_with_unfolded_gep_offset:
 ; CHECK:         .functype store_lane_i16_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -473,9 +473,9 @@ define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i16, i16* %p, i32 6
+  %s = getelementptr i16, ptr %p, i32 6
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %s
+  store i16 %x, ptr %s
   ret void
 }
 
@@ -487,9 +487,9 @@ define void @store_lane_i16_to_numeric_address(<8 x i16> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store16_lane 42, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i16*
+  %s = inttoptr i32 42 to ptr
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %s
+  store i16 %x, ptr %s
   ret void
 }
 
@@ -502,7 +502,7 @@ define void @store_lane_i16_from_global_address(<8 x i16> %v) {
 ; CHECK-NEXT:    v128.store16_lane gv_i16, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* @gv_i16
+  store i16 %x, ptr @gv_i16
   ret void
 }
 
@@ -510,7 +510,7 @@ define void @store_lane_i16_from_global_address(<8 x i16> %v) {
 ; v128.load32_lane / v128.store32_lane
 ;===----------------------------------------------------------------------------
 
-define <4 x i32> @load_lane_i32_no_offset(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_no_offset(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_no_offset:
 ; CHECK:         .functype load_lane_i32_no_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -518,12 +518,12 @@ define <4 x i32> @load_lane_i32_no_offset(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i32, i32* %p
+  %x = load i32, ptr %p
   %t = insertelement <4 x i32> %v, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_with_folded_offset(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_with_folded_offset:
 ; CHECK:         .functype load_lane_i32_with_folded_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -533,15 +533,15 @@ define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i32* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %x = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> %v, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_with_folded_gep_offset(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_with_folded_gep_offset:
 ; CHECK:         .functype load_lane_i32_with_folded_gep_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -551,13 +551,13 @@ define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %x = load i32, i32* %s
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> %v, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_lane_i32_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -567,13 +567,13 @@ define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p, <4 x
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  %x = load i32, i32* %s
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> %v, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_with_unfolded_offset(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_with_unfolded_offset:
 ; CHECK:         .functype load_lane_i32_with_unfolded_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -583,15 +583,15 @@ define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i32* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %x = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> %v, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_lane_i32_with_unfolded_gep_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -601,8 +601,8 @@ define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p, <4 x i32> %v)
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i32, i32* %p, i32 6
-  %x = load i32, i32* %s
+  %s = getelementptr i32, ptr %p, i32 6
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> %v, i32 %x, i32 0
   ret <4 x i32> %t
 }
@@ -615,8 +615,8 @@ define <4 x i32> @load_lane_i32_from_numeric_address(<4 x i32> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i32*
-  %x = load i32, i32* %s
+  %s = inttoptr i32 42 to ptr
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> %v, i32 %x, i32 0
   ret <4 x i32> %t
 }
@@ -630,12 +630,12 @@ define <4 x i32> @load_lane_i32_from_global_address(<4 x i32> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i32, i32* @gv_i32
+  %x = load i32, ptr @gv_i32
   %t = insertelement <4 x i32> %v, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_no_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_no_offset:
 ; CHECK:         .functype store_lane_i32_no_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -644,11 +644,11 @@ define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    v128.store32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %p
+  store i32 %x, ptr %p
   ret void
 }
 
-define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_with_folded_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_with_folded_offset:
 ; CHECK:         .functype store_lane_i32_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -656,15 +656,15 @@ define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store32_lane 24, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i32* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
+  %s = inttoptr i32 %r to ptr
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %s
+  store i32 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_with_folded_gep_offset:
 ; CHECK:         .functype store_lane_i32_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -672,13 +672,13 @@ define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store32_lane 24, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i32, i32* %p, i32 6
+  %s = getelementptr inbounds i32, ptr %p, i32 6
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %s
+  store i32 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_lane_i32_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -688,13 +688,13 @@ define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32*
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %s
+  store i32 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_with_unfolded_offset:
 ; CHECK:         .functype store_lane_i32_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -704,15 +704,15 @@ define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i32* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
+  %s = inttoptr i32 %r to ptr
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %s
+  store i32 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_with_unfolded_gep_offset:
 ; CHECK:         .functype store_lane_i32_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -722,9 +722,9 @@ define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i32, i32* %p, i32 6
+  %s = getelementptr i32, ptr %p, i32 6
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %s
+  store i32 %x, ptr %s
   ret void
 }
 
@@ -736,9 +736,9 @@ define void @store_lane_i32_to_numeric_address(<4 x i32> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store32_lane 42, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i32*
+  %s = inttoptr i32 42 to ptr
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %s
+  store i32 %x, ptr %s
   ret void
 }
 
@@ -751,7 +751,7 @@ define void @store_lane_i32_from_global_address(<4 x i32> %v) {
 ; CHECK-NEXT:    v128.store32_lane gv_i32, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* @gv_i32
+  store i32 %x, ptr @gv_i32
   ret void
 }
 
@@ -759,7 +759,7 @@ define void @store_lane_i32_from_global_address(<4 x i32> %v) {
 ; v128.load64_lane / v128.store64_lane
 ;===----------------------------------------------------------------------------
 
-define <2 x i64> @load_lane_i64_no_offset(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_no_offset(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_no_offset:
 ; CHECK:         .functype load_lane_i64_no_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -767,12 +767,12 @@ define <2 x i64> @load_lane_i64_no_offset(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* %p
+  %x = load i64, ptr %p
   %t = insertelement <2 x i64> %v, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_with_folded_offset(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_with_folded_offset:
 ; CHECK:         .functype load_lane_i64_with_folded_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -782,15 +782,15 @@ define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i64* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %x = load i64, i64* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> %v, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_with_folded_gep_offset(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_with_folded_gep_offset:
 ; CHECK:         .functype load_lane_i64_with_folded_gep_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -800,13 +800,13 @@ define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i64, i64* %p, i32 6
-  %x = load i64, i64* %s
+  %s = getelementptr inbounds i64, ptr %p, i32 6
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> %v, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_lane_i64_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -816,13 +816,13 @@ define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p, <2 x
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i64, i64* %p, i32 -6
-  %x = load i64, i64* %s
+  %s = getelementptr inbounds i64, ptr %p, i32 -6
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> %v, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_with_unfolded_offset(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_with_unfolded_offset:
 ; CHECK:         .functype load_lane_i64_with_unfolded_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -832,15 +832,15 @@ define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i64* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %x = load i64, i64* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> %v, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_lane_i64_with_unfolded_gep_offset (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -850,8 +850,8 @@ define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p, <2 x i64> %v)
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i64, i64* %p, i32 6
-  %x = load i64, i64* %s
+  %s = getelementptr i64, ptr %p, i32 6
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> %v, i64 %x, i32 0
   ret <2 x i64> %t
 }
@@ -864,8 +864,8 @@ define <2 x i64> @load_lane_i64_from_numeric_address(<2 x i64> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i64*
-  %x = load i64, i64* %s
+  %s = inttoptr i32 42 to ptr
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> %v, i64 %x, i32 0
   ret <2 x i64> %t
 }
@@ -879,12 +879,12 @@ define <2 x i64> @load_lane_i64_from_global_address(<2 x i64> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* @gv_i64
+  %x = load i64, ptr @gv_i64
   %t = insertelement <2 x i64> %v, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_no_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_no_offset:
 ; CHECK:         .functype store_lane_i64_no_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -893,11 +893,11 @@ define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %p
+  store i64 %x, ptr %p
   ret void
 }
 
-define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_with_folded_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_with_folded_offset:
 ; CHECK:         .functype store_lane_i64_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -905,15 +905,15 @@ define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 24, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i64* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
+  %s = inttoptr i32 %r to ptr
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %s
+  store i64 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_with_folded_gep_offset:
 ; CHECK:         .functype store_lane_i64_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -921,13 +921,13 @@ define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 48, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i64, i64* %p, i32 6
+  %s = getelementptr inbounds i64, ptr %p, i32 6
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %s
+  store i64 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_lane_i64_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -937,13 +937,13 @@ define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64*
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i64, i64* %p, i32 -6
+  %s = getelementptr inbounds i64, ptr %p, i32 -6
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %s
+  store i64 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_with_unfolded_offset:
 ; CHECK:         .functype store_lane_i64_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -953,15 +953,15 @@ define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i64* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
+  %s = inttoptr i32 %r to ptr
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %s
+  store i64 %x, ptr %s
   ret void
 }
 
-define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_with_unfolded_gep_offset:
 ; CHECK:         .functype store_lane_i64_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -971,9 +971,9 @@ define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i64, i64* %p, i32 6
+  %s = getelementptr i64, ptr %p, i32 6
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %s
+  store i64 %x, ptr %s
   ret void
 }
 
@@ -985,9 +985,9 @@ define void @store_lane_i64_to_numeric_address(<2 x i64> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 42, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i64*
+  %s = inttoptr i32 42 to ptr
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %s
+  store i64 %x, ptr %s
   ret void
 }
 
@@ -1000,6 +1000,6 @@ define void @store_lane_i64_from_global_address(<2 x i64> %v) {
 ; CHECK-NEXT:    v128.store64_lane gv_i64, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* @gv_i64
+  store i64 %x, ptr @gv_i64
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-promote-wide.ll b/llvm/test/CodeGen/WebAssembly/simd-load-promote-wide.ll
index b88c1b45e229..0000470f3e2a 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-load-promote-wide.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-promote-wide.ll
@@ -6,7 +6,7 @@
 
 target triple = "wasm32-unknown-unknown"
 
-define <4 x double> @load_promote_v2f64(<4 x float>* %p) {
+define <4 x double> @load_promote_v2f64(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64:
 ; CHECK:         .functype load_promote_v2f64 (i32, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -23,12 +23,12 @@ define <4 x double> @load_promote_v2f64(<4 x float>* %p) {
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load <4 x float>, <4 x float>* %p
+  %e = load <4 x float>, ptr %p
   %v = fpext <4 x float> %e to <4 x double>
   ret <4 x double> %v
 }
 
-define <4 x double> @load_promote_v2f64_with_folded_offset(<4 x float>* %p) {
+define <4 x double> @load_promote_v2f64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_folded_offset:
 ; CHECK:         .functype load_promote_v2f64_with_folded_offset (i32, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -47,15 +47,15 @@ define <4 x double> @load_promote_v2f64_with_folded_offset(<4 x float>* %p) {
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x float>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x float>*
-  %e = load <4 x float>, <4 x float>* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load <4 x float>, ptr %s
   %v = fpext <4 x float> %e to <4 x double>
   ret <4 x double> %v
 }
 
-define <4 x double> @load_promote_v2f64_with_folded_gep_offset(<4 x float>* %p) {
+define <4 x double> @load_promote_v2f64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_folded_gep_offset:
 ; CHECK:         .functype load_promote_v2f64_with_folded_gep_offset (i32, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -74,13 +74,13 @@ define <4 x double> @load_promote_v2f64_with_folded_gep_offset(<4 x float>* %p)
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 1
-  %e = load <4 x float>, <4 x float>* %s
+  %s = getelementptr inbounds <4 x float>, ptr %p, i32 1
+  %e = load <4 x float>, ptr %s
   %v = fpext <4 x float> %e to <4 x double>
   ret <4 x double> %v
 }
 
-define <4 x double> @load_promote_v2f64_with_unfolded_gep_negative_offset(<4 x float>* %p) {
+define <4 x double> @load_promote_v2f64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_promote_v2f64_with_unfolded_gep_negative_offset (i32, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -100,13 +100,13 @@ define <4 x double> @load_promote_v2f64_with_unfolded_gep_negative_offset(<4 x f
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 -1
-  %e = load <4 x float>, <4 x float>* %s
+  %s = getelementptr inbounds <4 x float>, ptr %p, i32 -1
+  %e = load <4 x float>, ptr %s
   %v = fpext <4 x float> %e to <4 x double>
   ret <4 x double> %v
 }
 
-define <4 x double> @load_promote_v2f64_with_unfolded_offset(<4 x float>* %p) {
+define <4 x double> @load_promote_v2f64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_unfolded_offset:
 ; CHECK:         .functype load_promote_v2f64_with_unfolded_offset (i32, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -125,15 +125,15 @@ define <4 x double> @load_promote_v2f64_with_unfolded_offset(<4 x float>* %p) {
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x float>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x float>*
-  %e = load <4 x float>, <4 x float>* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load <4 x float>, ptr %s
   %v = fpext <4 x float> %e to <4 x double>
   ret <4 x double> %v
 }
 
-define <4 x double> @load_promote_v2f64_with_unfolded_gep_offset(<4 x float>* %p) {
+define <4 x double> @load_promote_v2f64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_promote_v2f64_with_unfolded_gep_offset (i32, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -152,8 +152,8 @@ define <4 x double> @load_promote_v2f64_with_unfolded_gep_offset(<4 x float>* %p
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x float>, <4 x float>* %p, i32 1
-  %e = load <4 x float>, <4 x float>* %s
+  %s = getelementptr <4 x float>, ptr %p, i32 1
+  %e = load <4 x float>, ptr %s
   %v = fpext <4 x float> %e to <4 x double>
   ret <4 x double> %v
 }
@@ -173,8 +173,8 @@ define <4 x double> @load_promote_v2f64_from_numeric_address() {
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x float>*
-  %e = load <4 x float>, <4 x float>* %s
+  %s = inttoptr i32 32 to ptr
+  %e = load <4 x float>, ptr %s
   %v = fpext <4 x float> %e to <4 x double>
   ret <4 x double> %v
 }
@@ -197,7 +197,7 @@ define <4 x double> @load_promote_v2f64_from_global_address() {
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load <4 x float>, <4 x float>* @gv_v4f32
+  %e = load <4 x float>, ptr @gv_v4f32
   %v = fpext <4 x float> %e to <4 x double>
   ret <4 x double> %v
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-splat.ll b/llvm/test/CodeGen/WebAssembly/simd-load-splat.ll
index 4ad75f927e54..1a2aaa1f97ec 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-load-splat.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-splat.ll
@@ -11,10 +11,10 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-NEXT: v128.load8_splat $push[[V:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: v128.store 0($1), $pop[[V]]{{$}}
 ; CHECK-NEXT: return $[[E]]{{$}}
-define i8 @load_splat(i8* %p, <16 x i8>* %out) {
-  %e = load i8, i8* %p
+define i8 @load_splat(ptr %p, ptr %out) {
+  %e = load i8, ptr %p
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
-  store <16 x i8> %v2, <16 x i8>* %out
+  store <16 x i8> %v2, ptr %out
   ret i8 %e
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll b/llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
index b63900efb35e..bfab5fe36b3f 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
@@ -9,53 +9,53 @@ target triple = "wasm32-unknown-unknown"
 ; 16 x i8
 ; ==============================================================================
 
-define <16 x i8> @load_v16i8_a1(<16 x i8> *%p) {
+define <16 x i8> @load_v16i8_a1(ptr %p) {
 ; CHECK-LABEL: load_v16i8_a1:
 ; CHECK:         .functype load_v16i8_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <16 x i8>, <16 x i8>* %p, align 1
+  %v = load <16 x i8>, ptr %p, align 1
   ret <16 x i8> %v
 }
 
-define <16 x i8> @load_v16i8_a4(<16 x i8> *%p) {
+define <16 x i8> @load_v16i8_a4(ptr %p) {
 ; CHECK-LABEL: load_v16i8_a4:
 ; CHECK:         .functype load_v16i8_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <16 x i8>, <16 x i8>* %p, align 4
+  %v = load <16 x i8>, ptr %p, align 4
   ret <16 x i8> %v
 }
 
 ; 16 is the default alignment for v128 so no attribute is needed.
-define <16 x i8> @load_v16i8_a16(<16 x i8> *%p) {
+define <16 x i8> @load_v16i8_a16(ptr %p) {
 ; CHECK-LABEL: load_v16i8_a16:
 ; CHECK:         .functype load_v16i8_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <16 x i8>, <16 x i8>* %p, align 16
+  %v = load <16 x i8>, ptr %p, align 16
   ret <16 x i8> %v
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define <16 x i8> @load_v16i8_a32(<16 x i8> *%p) {
+define <16 x i8> @load_v16i8_a32(ptr %p) {
 ; CHECK-LABEL: load_v16i8_a32:
 ; CHECK:         .functype load_v16i8_a32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <16 x i8>, <16 x i8>* %p, align 32
+  %v = load <16 x i8>, ptr %p, align 32
   ret <16 x i8> %v
 }
 
-define void @store_v16i8_a1(<16 x i8> *%p, <16 x i8> %v) {
+define void @store_v16i8_a1(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: store_v16i8_a1:
 ; CHECK:         .functype store_v16i8_a1 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -63,11 +63,11 @@ define void @store_v16i8_a1(<16 x i8> *%p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  store <16 x i8> %v, <16 x i8>* %p, align 1
+  store <16 x i8> %v, ptr %p, align 1
   ret void
 }
 
-define void @store_v16i8_a4(<16 x i8> *%p, <16 x i8> %v) {
+define void @store_v16i8_a4(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: store_v16i8_a4:
 ; CHECK:         .functype store_v16i8_a4 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -75,12 +75,12 @@ define void @store_v16i8_a4(<16 x i8> *%p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  store <16 x i8> %v, <16 x i8>* %p, align 4
+  store <16 x i8> %v, ptr %p, align 4
   ret void
 }
 
 ; 16 is the default alignment for v128 so no attribute is needed.
-define void @store_v16i8_a16(<16 x i8> *%p, <16 x i8> %v) {
+define void @store_v16i8_a16(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: store_v16i8_a16:
 ; CHECK:         .functype store_v16i8_a16 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -88,12 +88,12 @@ define void @store_v16i8_a16(<16 x i8> *%p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <16 x i8> %v, <16 x i8>* %p, align 16
+  store <16 x i8> %v, ptr %p, align 16
   ret void
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define void @store_v16i8_a32(<16 x i8> *%p, <16 x i8> %v) {
+define void @store_v16i8_a32(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: store_v16i8_a32:
 ; CHECK:         .functype store_v16i8_a32 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -101,40 +101,40 @@ define void @store_v16i8_a32(<16 x i8> *%p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <16 x i8> %v, <16 x i8>* %p, align 32
+  store <16 x i8> %v, ptr %p, align 32
   ret void
 }
 
 ; 1 is the default alignment for v128.load8_splat so no attribute is needed.
-define <16 x i8> @load_splat_v16i8_a1(i8* %p) {
+define <16 x i8> @load_splat_v16i8_a1(ptr %p) {
 ; CHECK-LABEL: load_splat_v16i8_a1:
 ; CHECK:         .functype load_splat_v16i8_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load8_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i8, i8* %p, align 1
+  %e = load i8, ptr %p, align 1
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
 }
 
 ; 2 is greater than the default alignment so it is ignored.
-define <16 x i8> @load_splat_v16i8_a2(i8* %p) {
+define <16 x i8> @load_splat_v16i8_a2(ptr %p) {
 ; CHECK-LABEL: load_splat_v16i8_a2:
 ; CHECK:         .functype load_splat_v16i8_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load8_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i8, i8* %p, align 2
+  %e = load i8, ptr %p, align 2
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
 }
 
 ; 1 is the default alignment for v128.load8_lane so no attribute is needed.
-define <16 x i8> @load_lane_i8_a1(i8* %p, <16 x i8> %v) {
+define <16 x i8> @load_lane_i8_a1(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: load_lane_i8_a1:
 ; CHECK:         .functype load_lane_i8_a1 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -142,13 +142,13 @@ define <16 x i8> @load_lane_i8_a1(i8* %p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i8, i8* %p, align 1
+  %e = load i8, ptr %p, align 1
   %v1 = insertelement <16 x i8> %v, i8 %e, i32 0
   ret <16 x i8> %v1
 }
 
 ; 2 is greater than the default alignment so it is ignored.
-define <16 x i8> @load_lane_i8_a2(i8* %p, <16 x i8> %v) {
+define <16 x i8> @load_lane_i8_a2(ptr %p, <16 x i8> %v) {
 ; CHECK-LABEL: load_lane_i8_a2:
 ; CHECK:         .functype load_lane_i8_a2 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -156,13 +156,13 @@ define <16 x i8> @load_lane_i8_a2(i8* %p, <16 x i8> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i8, i8* %p, align 2
+  %e = load i8, ptr %p, align 2
   %v1 = insertelement <16 x i8> %v, i8 %e, i32 0
   ret <16 x i8> %v1
 }
 
 ; 1 is the default alignment for v128.store8_lane so no attribute is needed.
-define void @store_lane_i8_a1(<16 x i8> %v, i8* %p) {
+define void @store_lane_i8_a1(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i8_a1:
 ; CHECK:         .functype store_lane_i8_a1 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -171,12 +171,12 @@ define void @store_lane_i8_a1(<16 x i8> %v, i8* %p) {
 ; CHECK-NEXT:    v128.store8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %p, align 1
+  store i8 %x, ptr %p, align 1
   ret void
 }
 
 ; 2 is greater than the default alignment so it is ignored.
-define void @store_lane_i8_a2(<16 x i8> %v, i8* %p) {
+define void @store_lane_i8_a2(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i8_a2:
 ; CHECK:         .functype store_lane_i8_a2 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -185,7 +185,7 @@ define void @store_lane_i8_a2(<16 x i8> %v, i8* %p) {
 ; CHECK-NEXT:    v128.store8_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <16 x i8> %v, i32 0
-  store i8 %x, i8* %p, align 2
+  store i8 %x, ptr %p, align 2
   ret void
 }
 
@@ -193,53 +193,53 @@ define void @store_lane_i8_a2(<16 x i8> %v, i8* %p) {
 ; 8 x i16
 ; ==============================================================================
 
-define <8 x i16> @load_v8i16_a1(<8 x i16> *%p) {
+define <8 x i16> @load_v8i16_a1(ptr %p) {
 ; CHECK-LABEL: load_v8i16_a1:
 ; CHECK:         .functype load_v8i16_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i16>, <8 x i16>* %p, align 1
+  %v = load <8 x i16>, ptr %p, align 1
   ret <8 x i16> %v
 }
 
-define <8 x i16> @load_v8i16_a4(<8 x i16> *%p) {
+define <8 x i16> @load_v8i16_a4(ptr %p) {
 ; CHECK-LABEL: load_v8i16_a4:
 ; CHECK:         .functype load_v8i16_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i16>, <8 x i16>* %p, align 4
+  %v = load <8 x i16>, ptr %p, align 4
   ret <8 x i16> %v
 }
 
 ; 8 is the default alignment for v128 so no attribute is needed.
-define <8 x i16> @load_v8i16_a16(<8 x i16> *%p) {
+define <8 x i16> @load_v8i16_a16(ptr %p) {
 ; CHECK-LABEL: load_v8i16_a16:
 ; CHECK:         .functype load_v8i16_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i16>, <8 x i16>* %p, align 16
+  %v = load <8 x i16>, ptr %p, align 16
   ret <8 x i16> %v
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define <8 x i16> @load_v8i16_a32(<8 x i16> *%p) {
+define <8 x i16> @load_v8i16_a32(ptr %p) {
 ; CHECK-LABEL: load_v8i16_a32:
 ; CHECK:         .functype load_v8i16_a32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i16>, <8 x i16>* %p, align 32
+  %v = load <8 x i16>, ptr %p, align 32
   ret <8 x i16> %v
 }
 
-define void @store_v8i16_a1(<8 x i16> *%p, <8 x i16> %v) {
+define void @store_v8i16_a1(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: store_v8i16_a1:
 ; CHECK:         .functype store_v8i16_a1 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -247,11 +247,11 @@ define void @store_v8i16_a1(<8 x i16> *%p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  store <8 x i16> %v, <8 x i16>* %p, align 1
+  store <8 x i16> %v, ptr %p, align 1
   ret void
 }
 
-define void @store_v8i16_a4(<8 x i16> *%p, <8 x i16> %v) {
+define void @store_v8i16_a4(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: store_v8i16_a4:
 ; CHECK:         .functype store_v8i16_a4 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -259,12 +259,12 @@ define void @store_v8i16_a4(<8 x i16> *%p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  store <8 x i16> %v, <8 x i16>* %p, align 4
+  store <8 x i16> %v, ptr %p, align 4
   ret void
 }
 
 ; 16 is the default alignment for v128 so no attribute is needed.
-define void @store_v8i16_a16(<8 x i16> *%p, <8 x i16> %v) {
+define void @store_v8i16_a16(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: store_v8i16_a16:
 ; CHECK:         .functype store_v8i16_a16 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -272,12 +272,12 @@ define void @store_v8i16_a16(<8 x i16> *%p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <8 x i16> %v, <8 x i16>* %p, align 16
+  store <8 x i16> %v, ptr %p, align 16
   ret void
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define void @store_v8i16_a32(<8 x i16> *%p, <8 x i16> %v) {
+define void @store_v8i16_a32(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: store_v8i16_a32:
 ; CHECK:         .functype store_v8i16_a32 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -285,171 +285,171 @@ define void @store_v8i16_a32(<8 x i16> *%p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <8 x i16> %v, <8 x i16>* %p, align 32
+  store <8 x i16> %v, ptr %p, align 32
   ret void
 }
 
-define <8 x i8> @load_ext_v8i16_a1(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_a1(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_a1:
 ; CHECK:         .functype load_ext_v8i16_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 1
+  %v = load <8 x i8>, ptr %p, align 1
   ret <8 x i8> %v
 }
 
-define <8 x i8> @load_ext_v8i16_a2(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_a2(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_a2:
 ; CHECK:         .functype load_ext_v8i16_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 2
+  %v = load <8 x i8>, ptr %p, align 2
   ret <8 x i8> %v
 }
 
-define <8 x i8> @load_ext_v8i16_a4(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_a4(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_a4:
 ; CHECK:         .functype load_ext_v8i16_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 4
+  %v = load <8 x i8>, ptr %p, align 4
   ret <8 x i8> %v
 }
 
 ; 8 is the default alignment for v128 extending load so no attribute is needed.
-define <8 x i8> @load_ext_v8i16_a8(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_a8(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_a8:
 ; CHECK:         .functype load_ext_v8i16_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 8
+  %v = load <8 x i8>, ptr %p, align 8
   ret <8 x i8> %v
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-define <8 x i8> @load_ext_v8i16_a16(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_a16(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_a16:
 ; CHECK:         .functype load_ext_v8i16_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 16
+  %v = load <8 x i8>, ptr %p, align 16
   ret <8 x i8> %v
 }
 
-define <8 x i16> @load_sext_v8i16_a1(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_a1(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_a1:
 ; CHECK:         .functype load_sext_v8i16_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_s 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 1
+  %v = load <8 x i8>, ptr %p, align 1
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_sext_v8i16_a2(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_a2(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_a2:
 ; CHECK:         .functype load_sext_v8i16_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_s 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 2
+  %v = load <8 x i8>, ptr %p, align 2
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_sext_v8i16_a4(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_a4(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_a4:
 ; CHECK:         .functype load_sext_v8i16_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_s 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 4
+  %v = load <8 x i8>, ptr %p, align 4
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
 ; 8 is the default alignment for v128 extending load so no attribute is needed.
-define <8 x i16> @load_sext_v8i16_a8(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_a8(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_a8:
 ; CHECK:         .functype load_sext_v8i16_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 8
+  %v = load <8 x i8>, ptr %p, align 8
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-define <8 x i16> @load_sext_v8i16_a16(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_a16(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_a16:
 ; CHECK:         .functype load_sext_v8i16_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p, align 16
+  %v = load <8 x i8>, ptr %p, align 16
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_splat_v8i16_a1(i16* %p) {
+define <8 x i16> @load_splat_v8i16_a1(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16_a1:
 ; CHECK:         .functype load_splat_v8i16_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load16_splat 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i16, i16* %p, align 1
+  %e = load i16, ptr %p, align 1
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
 ; 2 is the default alignment for v128.load16_splat so no attribute is needed.
-define <8 x i16> @load_splat_v8i16_a2(i16* %p) {
+define <8 x i16> @load_splat_v8i16_a2(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16_a2:
 ; CHECK:         .functype load_splat_v8i16_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load16_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i16, i16* %p, align 2
+  %e = load i16, ptr %p, align 2
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
 ; 4 is greater than the default alignment so it is ignored.
-define <8 x i16> @load_splat_v8i16_a4(i16* %p) {
+define <8 x i16> @load_splat_v8i16_a4(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16_a4:
 ; CHECK:         .functype load_splat_v8i16_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load16_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i16, i16* %p, align 4
+  %e = load i16, ptr %p, align 4
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_lane_i16_a1(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_a1(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_a1:
 ; CHECK:         .functype load_lane_i16_a1 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -457,13 +457,13 @@ define <8 x i16> @load_lane_i16_a1(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i16, i16* %p, align 1
+  %e = load i16, ptr %p, align 1
   %v1 = insertelement <8 x i16> %v, i16 %e, i32 0
   ret <8 x i16> %v1
 }
 
 ; 2 is the default alignment for v128.load16_lane so no attribute is needed.
-define <8 x i16> @load_lane_i16_a2(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_a2(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_a2:
 ; CHECK:         .functype load_lane_i16_a2 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -471,13 +471,13 @@ define <8 x i16> @load_lane_i16_a2(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i16, i16* %p, align 2
+  %e = load i16, ptr %p, align 2
   %v1 = insertelement <8 x i16> %v, i16 %e, i32 0
   ret <8 x i16> %v1
 }
 
 ; 4 is greater than the default alignment so it is ignored.
-define <8 x i16> @load_lane_i16_a4(i16* %p, <8 x i16> %v) {
+define <8 x i16> @load_lane_i16_a4(ptr %p, <8 x i16> %v) {
 ; CHECK-LABEL: load_lane_i16_a4:
 ; CHECK:         .functype load_lane_i16_a4 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -485,12 +485,12 @@ define <8 x i16> @load_lane_i16_a4(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i16, i16* %p, align 4
+  %e = load i16, ptr %p, align 4
   %v1 = insertelement <8 x i16> %v, i16 %e, i32 0
   ret <8 x i16> %v1
 }
 
-define void @store_lane_i16_a1(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_a1(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_a1:
 ; CHECK:         .functype store_lane_i16_a1 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -499,12 +499,12 @@ define void @store_lane_i16_a1(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %p, align 1
+  store i16 %x, ptr %p, align 1
   ret void
 }
 
 ; 2 is the default alignment for v128.store16_lane so no attribute is needed.
-define void @store_lane_i16_a2(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_a2(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_a2:
 ; CHECK:         .functype store_lane_i16_a2 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -513,12 +513,12 @@ define void @store_lane_i16_a2(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    v128.store16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %p, align 2
+  store i16 %x, ptr %p, align 2
   ret void
 }
 
 ; 4 is greater than the default alignment so it is ignored.
-define void @store_lane_i16_a4(<8 x i16> %v, i16* %p) {
+define void @store_lane_i16_a4(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i16_a4:
 ; CHECK:         .functype store_lane_i16_a4 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -527,7 +527,7 @@ define void @store_lane_i16_a4(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    v128.store16_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <8 x i16> %v, i32 0
-  store i16 %x, i16* %p, align 4
+  store i16 %x, ptr %p, align 4
   ret void
 }
 
@@ -535,53 +535,53 @@ define void @store_lane_i16_a4(<8 x i16> %v, i16* %p) {
 ; 4 x i32
 ; ==============================================================================
 
-define <4 x i32> @load_v4i32_a1(<4 x i32> *%p) {
+define <4 x i32> @load_v4i32_a1(ptr %p) {
 ; CHECK-LABEL: load_v4i32_a1:
 ; CHECK:         .functype load_v4i32_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i32>, <4 x i32>* %p, align 1
+  %v = load <4 x i32>, ptr %p, align 1
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_v4i32_a4(<4 x i32> *%p) {
+define <4 x i32> @load_v4i32_a4(ptr %p) {
 ; CHECK-LABEL: load_v4i32_a4:
 ; CHECK:         .functype load_v4i32_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i32>, <4 x i32>* %p, align 4
+  %v = load <4 x i32>, ptr %p, align 4
   ret <4 x i32> %v
 }
 
 ; 4 is the default alignment for v128 so no attribute is needed.
-define <4 x i32> @load_v4i32_a16(<4 x i32> *%p) {
+define <4 x i32> @load_v4i32_a16(ptr %p) {
 ; CHECK-LABEL: load_v4i32_a16:
 ; CHECK:         .functype load_v4i32_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i32>, <4 x i32>* %p, align 16
+  %v = load <4 x i32>, ptr %p, align 16
   ret <4 x i32> %v
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define <4 x i32> @load_v4i32_a32(<4 x i32> *%p) {
+define <4 x i32> @load_v4i32_a32(ptr %p) {
 ; CHECK-LABEL: load_v4i32_a32:
 ; CHECK:         .functype load_v4i32_a32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i32>, <4 x i32>* %p, align 32
+  %v = load <4 x i32>, ptr %p, align 32
   ret <4 x i32> %v
 }
 
-define void @store_v4i32_a1(<4 x i32> *%p, <4 x i32> %v) {
+define void @store_v4i32_a1(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: store_v4i32_a1:
 ; CHECK:         .functype store_v4i32_a1 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -589,11 +589,11 @@ define void @store_v4i32_a1(<4 x i32> *%p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x i32> %v, <4 x i32>* %p, align 1
+  store <4 x i32> %v, ptr %p, align 1
   ret void
 }
 
-define void @store_v4i32_a4(<4 x i32> *%p, <4 x i32> %v) {
+define void @store_v4i32_a4(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: store_v4i32_a4:
 ; CHECK:         .functype store_v4i32_a4 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -601,12 +601,12 @@ define void @store_v4i32_a4(<4 x i32> *%p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x i32> %v, <4 x i32>* %p, align 4
+  store <4 x i32> %v, ptr %p, align 4
   ret void
 }
 
 ; 16 is the default alignment for v128 so no attribute is needed.
-define void @store_v4i32_a16(<4 x i32> *%p, <4 x i32> %v) {
+define void @store_v4i32_a16(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: store_v4i32_a16:
 ; CHECK:         .functype store_v4i32_a16 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -614,12 +614,12 @@ define void @store_v4i32_a16(<4 x i32> *%p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x i32> %v, <4 x i32>* %p, align 16
+  store <4 x i32> %v, ptr %p, align 16
   ret void
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define void @store_v4i32_a32(<4 x i32> *%p, <4 x i32> %v) {
+define void @store_v4i32_a32(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: store_v4i32_a32:
 ; CHECK:         .functype store_v4i32_a32 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -627,184 +627,184 @@ define void @store_v4i32_a32(<4 x i32> *%p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x i32> %v, <4 x i32>* %p, align 32
+  store <4 x i32> %v, ptr %p, align 32
   ret void
 }
 
-define <4 x i16> @load_ext_v4i32_a1(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_a1(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_a1:
 ; CHECK:         .functype load_ext_v4i32_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 1
+  %v = load <4 x i16>, ptr %p, align 1
   ret <4 x i16> %v
 }
 
-define <4 x i16> @load_ext_v4i32_a2(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_a2(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_a2:
 ; CHECK:         .functype load_ext_v4i32_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 2
+  %v = load <4 x i16>, ptr %p, align 2
   ret <4 x i16> %v
 }
 
-define <4 x i16> @load_ext_v4i32_a4(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_a4(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_a4:
 ; CHECK:         .functype load_ext_v4i32_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 4
+  %v = load <4 x i16>, ptr %p, align 4
   ret <4 x i16> %v
 }
 
 ; 8 is the default alignment for v128 extending load so no attribute is needed.
-define <4 x i16> @load_ext_v4i32_a8(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_a8(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_a8:
 ; CHECK:         .functype load_ext_v4i32_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 8
+  %v = load <4 x i16>, ptr %p, align 8
   ret <4 x i16> %v
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-define <4 x i16> @load_ext_v4i32_a16(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_a16(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_a16:
 ; CHECK:         .functype load_ext_v4i32_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 16
+  %v = load <4 x i16>, ptr %p, align 16
   ret <4 x i16> %v
 }
 
-define <4 x i32> @load_sext_v4i32_a1(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i32_a1(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i32_a1:
 ; CHECK:         .functype load_sext_v4i32_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_s 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 1
+  %v = load <4 x i16>, ptr %p, align 1
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i32_a2(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i32_a2(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i32_a2:
 ; CHECK:         .functype load_sext_v4i32_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_s 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 2
+  %v = load <4 x i16>, ptr %p, align 2
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i32_a4(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i32_a4(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i32_a4:
 ; CHECK:         .functype load_sext_v4i32_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_s 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 4
+  %v = load <4 x i16>, ptr %p, align 4
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
 ; 8 is the default alignment for v128 extending load so no attribute is needed.
-define <4 x i32> @load_sext_v4i32_a8(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i32_a8(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i32_a8:
 ; CHECK:         .functype load_sext_v4i32_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 8
+  %v = load <4 x i16>, ptr %p, align 8
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-define <4 x i32> @load_sext_v4i32_a16(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i32_a16(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i32_a16:
 ; CHECK:         .functype load_sext_v4i32_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p, align 16
+  %v = load <4 x i16>, ptr %p, align 16
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_splat_v4i32_a1(i32* %addr) {
+define <4 x i32> @load_splat_v4i32_a1(ptr %addr) {
 ; CHECK-LABEL: load_splat_v4i32_a1:
 ; CHECK:         .functype load_splat_v4i32_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %addr, align 1
+  %e = load i32, ptr %addr, align 1
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_splat_v4i32_a2(i32* %addr) {
+define <4 x i32> @load_splat_v4i32_a2(ptr %addr) {
 ; CHECK-LABEL: load_splat_v4i32_a2:
 ; CHECK:         .functype load_splat_v4i32_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %addr, align 2
+  %e = load i32, ptr %addr, align 2
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
 ; 4 is the default alignment for v128.load32_splat so no attribute is needed.
-define <4 x i32> @load_splat_v4i32_a4(i32* %addr) {
+define <4 x i32> @load_splat_v4i32_a4(ptr %addr) {
 ; CHECK-LABEL: load_splat_v4i32_a4:
 ; CHECK:         .functype load_splat_v4i32_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %addr, align 4
+  %e = load i32, ptr %addr, align 4
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
 ; 8 is greater than the default alignment so it is ignored.
-define <4 x i32> @load_splat_v4i32_a8(i32* %addr) {
+define <4 x i32> @load_splat_v4i32_a8(ptr %addr) {
 ; CHECK-LABEL: load_splat_v4i32_a8:
 ; CHECK:         .functype load_splat_v4i32_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %addr, align 8
+  %e = load i32, ptr %addr, align 8
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_lane_i32_a1(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_a1(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_a1:
 ; CHECK:         .functype load_lane_i32_a1 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -812,12 +812,12 @@ define <4 x i32> @load_lane_i32_a1(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %p, align 1
+  %e = load i32, ptr %p, align 1
   %v1 = insertelement <4 x i32> %v, i32 %e, i32 0
   ret <4 x i32> %v1
 }
 
-define <4 x i32> @load_lane_i32_a2(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_a2(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_a2:
 ; CHECK:         .functype load_lane_i32_a2 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -825,13 +825,13 @@ define <4 x i32> @load_lane_i32_a2(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0:p2align=1, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %p, align 2
+  %e = load i32, ptr %p, align 2
   %v1 = insertelement <4 x i32> %v, i32 %e, i32 0
   ret <4 x i32> %v1
 }
 
 ; 4 is the default alignment for v128.load32_lane so no attribute is needed.
-define <4 x i32> @load_lane_i32_a4(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_a4(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_a4:
 ; CHECK:         .functype load_lane_i32_a4 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -839,13 +839,13 @@ define <4 x i32> @load_lane_i32_a4(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %p, align 4
+  %e = load i32, ptr %p, align 4
   %v1 = insertelement <4 x i32> %v, i32 %e, i32 0
   ret <4 x i32> %v1
 }
 
 ; 8 is greater than the default alignment so it is ignored.
-define <4 x i32> @load_lane_i32_a8(i32* %p, <4 x i32> %v) {
+define <4 x i32> @load_lane_i32_a8(ptr %p, <4 x i32> %v) {
 ; CHECK-LABEL: load_lane_i32_a8:
 ; CHECK:         .functype load_lane_i32_a8 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -853,12 +853,12 @@ define <4 x i32> @load_lane_i32_a8(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %p, align 8
+  %e = load i32, ptr %p, align 8
   %v1 = insertelement <4 x i32> %v, i32 %e, i32 0
   ret <4 x i32> %v1
 }
 
-define void @store_lane_i32_a1(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_a1(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_a1:
 ; CHECK:         .functype store_lane_i32_a1 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -867,11 +867,11 @@ define void @store_lane_i32_a1(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %p, align 1
+  store i32 %x, ptr %p, align 1
   ret void
 }
 
-define void @store_lane_i32_a2(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_a2(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_a2:
 ; CHECK:         .functype store_lane_i32_a2 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -880,12 +880,12 @@ define void @store_lane_i32_a2(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    v128.store32_lane 0:p2align=1, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %p, align 2
+  store i32 %x, ptr %p, align 2
   ret void
 }
 
 ; 4 is the default alignment for v128.store32_lane so no attribute is needed.
-define void @store_lane_i32_a4(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_a4(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_a4:
 ; CHECK:         .functype store_lane_i32_a4 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -894,12 +894,12 @@ define void @store_lane_i32_a4(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    v128.store32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %p, align 4
+  store i32 %x, ptr %p, align 4
   ret void
 }
 
 ; 8 is greater than the default alignment so it is ignored.
-define void @store_lane_i32_a8(<4 x i32> %v, i32* %p) {
+define void @store_lane_i32_a8(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i32_a8:
 ; CHECK:         .functype store_lane_i32_a8 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -908,56 +908,56 @@ define void @store_lane_i32_a8(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    v128.store32_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <4 x i32> %v, i32 0
-  store i32 %x, i32* %p, align 8
+  store i32 %x, ptr %p, align 8
   ret void
 }
 
-define <4 x i32> @load_zero_i32_a1(i32* %p) {
+define <4 x i32> @load_zero_i32_a1(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_a1:
 ; CHECK:         .functype load_zero_i32_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i32, i32* %p, align 1
+  %x = load i32, ptr %p, align 1
   %v = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_zero_i32_a2(i32* %p) {
+define <4 x i32> @load_zero_i32_a2(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_a2:
 ; CHECK:         .functype load_zero_i32_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_zero 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i32, i32* %p, align 2
+  %x = load i32, ptr %p, align 2
   %v = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %v
 }
 
 ; 4 is the default alignment for v128.load32_zero so no attribute is needed.
-define <4 x i32> @load_zero_i32_a4(i32* %p) {
+define <4 x i32> @load_zero_i32_a4(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_a4:
 ; CHECK:         .functype load_zero_i32_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %v = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %v
 }
 
 ; 8 is greater than the default alignment so it is ignored.
-define <4 x i32> @load_zero_i32_a8(i32* %p) {
+define <4 x i32> @load_zero_i32_a8(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_a8:
 ; CHECK:         .functype load_zero_i32_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i32, i32* %p, align 8
+  %x = load i32, ptr %p, align 8
   %v = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %v
 }
@@ -966,53 +966,53 @@ define <4 x i32> @load_zero_i32_a8(i32* %p) {
 ; 2 x i64
 ; ==============================================================================
 
-define <2 x i64> @load_v2i64_a1(<2 x i64> *%p) {
+define <2 x i64> @load_v2i64_a1(ptr %p) {
 ; CHECK-LABEL: load_v2i64_a1:
 ; CHECK:         .functype load_v2i64_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i64>, <2 x i64>* %p, align 1
+  %v = load <2 x i64>, ptr %p, align 1
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_v2i64_a4(<2 x i64> *%p) {
+define <2 x i64> @load_v2i64_a4(ptr %p) {
 ; CHECK-LABEL: load_v2i64_a4:
 ; CHECK:         .functype load_v2i64_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i64>, <2 x i64>* %p, align 4
+  %v = load <2 x i64>, ptr %p, align 4
   ret <2 x i64> %v
 }
 
 ; 2 is the default alignment for v128 so no attribute is needed.
-define <2 x i64> @load_v2i64_a16(<2 x i64> *%p) {
+define <2 x i64> @load_v2i64_a16(ptr %p) {
 ; CHECK-LABEL: load_v2i64_a16:
 ; CHECK:         .functype load_v2i64_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i64>, <2 x i64>* %p, align 16
+  %v = load <2 x i64>, ptr %p, align 16
   ret <2 x i64> %v
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define <2 x i64> @load_v2i64_a32(<2 x i64> *%p) {
+define <2 x i64> @load_v2i64_a32(ptr %p) {
 ; CHECK-LABEL: load_v2i64_a32:
 ; CHECK:         .functype load_v2i64_a32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i64>, <2 x i64>* %p, align 32
+  %v = load <2 x i64>, ptr %p, align 32
   ret <2 x i64> %v
 }
 
-define void @store_v2i64_a1(<2 x i64> *%p, <2 x i64> %v) {
+define void @store_v2i64_a1(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: store_v2i64_a1:
 ; CHECK:         .functype store_v2i64_a1 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1020,11 +1020,11 @@ define void @store_v2i64_a1(<2 x i64> *%p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x i64> %v, <2 x i64>* %p, align 1
+  store <2 x i64> %v, ptr %p, align 1
   ret void
 }
 
-define void @store_v2i64_a4(<2 x i64> *%p, <2 x i64> %v) {
+define void @store_v2i64_a4(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: store_v2i64_a4:
 ; CHECK:         .functype store_v2i64_a4 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1032,12 +1032,12 @@ define void @store_v2i64_a4(<2 x i64> *%p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x i64> %v, <2 x i64>* %p, align 4
+  store <2 x i64> %v, ptr %p, align 4
   ret void
 }
 
 ; 16 is the default alignment for v128 so no attribute is needed.
-define void @store_v2i64_a16(<2 x i64> *%p, <2 x i64> %v) {
+define void @store_v2i64_a16(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: store_v2i64_a16:
 ; CHECK:         .functype store_v2i64_a16 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1045,12 +1045,12 @@ define void @store_v2i64_a16(<2 x i64> *%p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x i64> %v, <2 x i64>* %p, align 16
+  store <2 x i64> %v, ptr %p, align 16
   ret void
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define void @store_v2i64_a32(<2 x i64> *%p, <2 x i64> %v) {
+define void @store_v2i64_a32(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: store_v2i64_a32:
 ; CHECK:         .functype store_v2i64_a32 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1058,78 +1058,78 @@ define void @store_v2i64_a32(<2 x i64> *%p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x i64> %v, <2 x i64>* %p, align 32
+  store <2 x i64> %v, ptr %p, align 32
   ret void
 }
 
-define <2 x i64> @load_splat_v2i64_a1(i64* %p) {
+define <2 x i64> @load_splat_v2i64_a1(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_a1:
 ; CHECK:         .functype load_splat_v2i64_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 1
+  %e = load i64, ptr %p, align 1
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_splat_v2i64_a2(i64* %p) {
+define <2 x i64> @load_splat_v2i64_a2(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_a2:
 ; CHECK:         .functype load_splat_v2i64_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 2
+  %e = load i64, ptr %p, align 2
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_splat_v2i64_a4(i64* %p) {
+define <2 x i64> @load_splat_v2i64_a4(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_a4:
 ; CHECK:         .functype load_splat_v2i64_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 4
+  %e = load i64, ptr %p, align 4
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
 ; 8 is the default alignment for v128.load64_splat so no attribute is needed.
-define <2 x i64> @load_splat_v2i64_a8(i64* %p) {
+define <2 x i64> @load_splat_v2i64_a8(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_a8:
 ; CHECK:         .functype load_splat_v2i64_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 8
+  %e = load i64, ptr %p, align 8
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-define <2 x i64> @load_splat_v2i64_a16(i64* %p) {
+define <2 x i64> @load_splat_v2i64_a16(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_a16:
 ; CHECK:         .functype load_splat_v2i64_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 16
+  %e = load i64, ptr %p, align 16
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_lane_i64_a1(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_a1(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_a1:
 ; CHECK:         .functype load_lane_i64_a1 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1137,12 +1137,12 @@ define <2 x i64> @load_lane_i64_a1(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 1
+  %e = load i64, ptr %p, align 1
   %v1 = insertelement <2 x i64> %v, i64 %e, i32 0
   ret <2 x i64> %v1
 }
 
-define <2 x i64> @load_lane_i64_a2(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_a2(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_a2:
 ; CHECK:         .functype load_lane_i64_a2 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1150,12 +1150,12 @@ define <2 x i64> @load_lane_i64_a2(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0:p2align=1, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 2
+  %e = load i64, ptr %p, align 2
   %v1 = insertelement <2 x i64> %v, i64 %e, i32 0
   ret <2 x i64> %v1
 }
 
-define <2 x i64> @load_lane_i64_a4(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_a4(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_a4:
 ; CHECK:         .functype load_lane_i64_a4 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1163,13 +1163,13 @@ define <2 x i64> @load_lane_i64_a4(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0:p2align=2, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 4
+  %e = load i64, ptr %p, align 4
   %v1 = insertelement <2 x i64> %v, i64 %e, i32 0
   ret <2 x i64> %v1
 }
 
 ; 8 is the default alignment for v128.load64_lane so no attribute is needed.
-define <2 x i64> @load_lane_i64_a8(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_a8(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_a8:
 ; CHECK:         .functype load_lane_i64_a8 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1177,13 +1177,13 @@ define <2 x i64> @load_lane_i64_a8(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 8
+  %e = load i64, ptr %p, align 8
   %v1 = insertelement <2 x i64> %v, i64 %e, i32 0
   ret <2 x i64> %v1
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-define <2 x i64> @load_lane_i64_a16(i64* %p, <2 x i64> %v) {
+define <2 x i64> @load_lane_i64_a16(ptr %p, <2 x i64> %v) {
 ; CHECK-LABEL: load_lane_i64_a16:
 ; CHECK:         .functype load_lane_i64_a16 (i32, v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1191,12 +1191,12 @@ define <2 x i64> @load_lane_i64_a16(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.load64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p, align 16
+  %e = load i64, ptr %p, align 16
   %v1 = insertelement <2 x i64> %v, i64 %e, i32 0
   ret <2 x i64> %v1
 }
 
-define void @store_lane_i64_a1(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_a1(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_a1:
 ; CHECK:         .functype store_lane_i64_a1 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1205,11 +1205,11 @@ define void @store_lane_i64_a1(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %p, align 1
+  store i64 %x, ptr %p, align 1
   ret void
 }
 
-define void @store_lane_i64_a2(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_a2(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_a2:
 ; CHECK:         .functype store_lane_i64_a2 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1218,11 +1218,11 @@ define void @store_lane_i64_a2(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    v128.store64_lane 0:p2align=1, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %p, align 2
+  store i64 %x, ptr %p, align 2
   ret void
 }
 
-define void @store_lane_i64_a4(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_a4(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_a4:
 ; CHECK:         .functype store_lane_i64_a4 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1231,12 +1231,12 @@ define void @store_lane_i64_a4(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    v128.store64_lane 0:p2align=2, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %p, align 4
+  store i64 %x, ptr %p, align 4
   ret void
 }
 
 ; 8 is the default alignment for v128.store64_lane so no attribute is needed.
-define void @store_lane_i64_a8(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_a8(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_a8:
 ; CHECK:         .functype store_lane_i64_a8 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1245,12 +1245,12 @@ define void @store_lane_i64_a8(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %p, align 8
+  store i64 %x, ptr %p, align 8
   ret void
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-define void @store_lane_i64_a16(<2 x i64> %v, i64* %p) {
+define void @store_lane_i64_a16(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_lane_i64_a16:
 ; CHECK:         .functype store_lane_i64_a16 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1259,68 +1259,68 @@ define void @store_lane_i64_a16(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %x = extractelement <2 x i64> %v, i32 0
-  store i64 %x, i64* %p, align 16
+  store i64 %x, ptr %p, align 16
   ret void
 }
 
-define <2 x i64> @load_zero_i64_a1(i64* %p) {
+define <2 x i64> @load_zero_i64_a1(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_a1:
 ; CHECK:         .functype load_zero_i64_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* %p, align 1
+  %x = load i64, ptr %p, align 1
   %v = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_zero_i64_a2(i64* %p) {
+define <2 x i64> @load_zero_i64_a2(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_a2:
 ; CHECK:         .functype load_zero_i64_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* %p, align 2
+  %x = load i64, ptr %p, align 2
   %v = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_zero_i64_a4(i64* %p) {
+define <2 x i64> @load_zero_i64_a4(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_a4:
 ; CHECK:         .functype load_zero_i64_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* %p, align 4
+  %x = load i64, ptr %p, align 4
   %v = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %v
 }
 
 ; 8 is the default alignment for v128.load64_zero so no attribute is needed.
-define <2 x i64> @load_zero_i64_a8(i64* %p) {
+define <2 x i64> @load_zero_i64_a8(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_a8:
 ; CHECK:         .functype load_zero_i64_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* %p, align 8
+  %x = load i64, ptr %p, align 8
   %v = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %v
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-define <2 x i64> @load_zero_i64_a16(i64* %p) {
+define <2 x i64> @load_zero_i64_a16(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_a16:
 ; CHECK:         .functype load_zero_i64_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* %p, align 16
+  %x = load i64, ptr %p, align 16
   %v = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %v
 }
@@ -1329,53 +1329,53 @@ define <2 x i64> @load_zero_i64_a16(i64* %p) {
 ; 4 x float
 ; ==============================================================================
 
-define <4 x float> @load_v4f32_a1(<4 x float> *%p) {
+define <4 x float> @load_v4f32_a1(ptr %p) {
 ; CHECK-LABEL: load_v4f32_a1:
 ; CHECK:         .functype load_v4f32_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x float>, <4 x float>* %p, align 1
+  %v = load <4 x float>, ptr %p, align 1
   ret <4 x float> %v
 }
 
-define <4 x float> @load_v4f32_a4(<4 x float> *%p) {
+define <4 x float> @load_v4f32_a4(ptr %p) {
 ; CHECK-LABEL: load_v4f32_a4:
 ; CHECK:         .functype load_v4f32_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x float>, <4 x float>* %p, align 4
+  %v = load <4 x float>, ptr %p, align 4
   ret <4 x float> %v
 }
 
 ; 4 is the default alignment for v128 so no attribute is needed.
-define <4 x float> @load_v4f32_a16(<4 x float> *%p) {
+define <4 x float> @load_v4f32_a16(ptr %p) {
 ; CHECK-LABEL: load_v4f32_a16:
 ; CHECK:         .functype load_v4f32_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x float>, <4 x float>* %p, align 16
+  %v = load <4 x float>, ptr %p, align 16
   ret <4 x float> %v
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define <4 x float> @load_v4f32_a32(<4 x float> *%p) {
+define <4 x float> @load_v4f32_a32(ptr %p) {
 ; CHECK-LABEL: load_v4f32_a32:
 ; CHECK:         .functype load_v4f32_a32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x float>, <4 x float>* %p, align 32
+  %v = load <4 x float>, ptr %p, align 32
   ret <4 x float> %v
 }
 
-define void @store_v4f32_a1(<4 x float> *%p, <4 x float> %v) {
+define void @store_v4f32_a1(ptr %p, <4 x float> %v) {
 ; CHECK-LABEL: store_v4f32_a1:
 ; CHECK:         .functype store_v4f32_a1 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1383,11 +1383,11 @@ define void @store_v4f32_a1(<4 x float> *%p, <4 x float> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x float> %v, <4 x float>* %p, align 1
+  store <4 x float> %v, ptr %p, align 1
   ret void
 }
 
-define void @store_v4f32_a4(<4 x float> *%p, <4 x float> %v) {
+define void @store_v4f32_a4(ptr %p, <4 x float> %v) {
 ; CHECK-LABEL: store_v4f32_a4:
 ; CHECK:         .functype store_v4f32_a4 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1395,12 +1395,12 @@ define void @store_v4f32_a4(<4 x float> *%p, <4 x float> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x float> %v, <4 x float>* %p, align 4
+  store <4 x float> %v, ptr %p, align 4
   ret void
 }
 
 ; 16 is the default alignment for v128 so no attribute is needed.
-define void @store_v4f32_a16(<4 x float> *%p, <4 x float> %v) {
+define void @store_v4f32_a16(ptr %p, <4 x float> %v) {
 ; CHECK-LABEL: store_v4f32_a16:
 ; CHECK:         .functype store_v4f32_a16 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1408,12 +1408,12 @@ define void @store_v4f32_a16(<4 x float> *%p, <4 x float> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x float> %v, <4 x float>* %p, align 16
+  store <4 x float> %v, ptr %p, align 16
   ret void
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define void @store_v4f32_a32(<4 x float> *%p, <4 x float> %v) {
+define void @store_v4f32_a32(ptr %p, <4 x float> %v) {
 ; CHECK-LABEL: store_v4f32_a32:
 ; CHECK:         .functype store_v4f32_a32 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1421,7 +1421,7 @@ define void @store_v4f32_a32(<4 x float> *%p, <4 x float> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x float> %v, <4 x float>* %p, align 32
+  store <4 x float> %v, ptr %p, align 32
   ret void
 }
 
@@ -1429,53 +1429,53 @@ define void @store_v4f32_a32(<4 x float> *%p, <4 x float> %v) {
 ; 2 x double
 ; ==============================================================================
 
-define <2 x double> @load_v2f64_a1(<2 x double> *%p) {
+define <2 x double> @load_v2f64_a1(ptr %p) {
 ; CHECK-LABEL: load_v2f64_a1:
 ; CHECK:         .functype load_v2f64_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x double>, <2 x double>* %p, align 1
+  %v = load <2 x double>, ptr %p, align 1
   ret <2 x double> %v
 }
 
-define <2 x double> @load_v2f64_a4(<2 x double> *%p) {
+define <2 x double> @load_v2f64_a4(ptr %p) {
 ; CHECK-LABEL: load_v2f64_a4:
 ; CHECK:         .functype load_v2f64_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x double>, <2 x double>* %p, align 4
+  %v = load <2 x double>, ptr %p, align 4
   ret <2 x double> %v
 }
 
 ; 2 is the default alignment for v128 so no attribute is needed.
-define <2 x double> @load_v2f64_a16(<2 x double> *%p) {
+define <2 x double> @load_v2f64_a16(ptr %p) {
 ; CHECK-LABEL: load_v2f64_a16:
 ; CHECK:         .functype load_v2f64_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x double>, <2 x double>* %p, align 16
+  %v = load <2 x double>, ptr %p, align 16
   ret <2 x double> %v
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define <2 x double> @load_v2f64_a32(<2 x double> *%p) {
+define <2 x double> @load_v2f64_a32(ptr %p) {
 ; CHECK-LABEL: load_v2f64_a32:
 ; CHECK:         .functype load_v2f64_a32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x double>, <2 x double>* %p, align 32
+  %v = load <2 x double>, ptr %p, align 32
   ret <2 x double> %v
 }
 
-define void @store_v2f64_a1(<2 x double> *%p, <2 x double> %v) {
+define void @store_v2f64_a1(ptr %p, <2 x double> %v) {
 ; CHECK-LABEL: store_v2f64_a1:
 ; CHECK:         .functype store_v2f64_a1 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1483,11 +1483,11 @@ define void @store_v2f64_a1(<2 x double> *%p, <2 x double> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x double> %v, <2 x double>* %p, align 1
+  store <2 x double> %v, ptr %p, align 1
   ret void
 }
 
-define void @store_v2f64_a4(<2 x double> *%p, <2 x double> %v) {
+define void @store_v2f64_a4(ptr %p, <2 x double> %v) {
 ; CHECK-LABEL: store_v2f64_a4:
 ; CHECK:         .functype store_v2f64_a4 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1495,12 +1495,12 @@ define void @store_v2f64_a4(<2 x double> *%p, <2 x double> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x double> %v, <2 x double>* %p, align 4
+  store <2 x double> %v, ptr %p, align 4
   ret void
 }
 
 ; 16 is the default alignment for v128 so no attribute is needed.
-define void @store_v2f64_a16(<2 x double> *%p, <2 x double> %v) {
+define void @store_v2f64_a16(ptr %p, <2 x double> %v) {
 ; CHECK-LABEL: store_v2f64_a16:
 ; CHECK:         .functype store_v2f64_a16 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1508,12 +1508,12 @@ define void @store_v2f64_a16(<2 x double> *%p, <2 x double> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x double> %v, <2 x double>* %p, align 16
+  store <2 x double> %v, ptr %p, align 16
   ret void
 }
 
 ; 32 is greater than the default alignment so it is ignored.
-define void @store_v2f64_a32(<2 x double> *%p, <2 x double> %v) {
+define void @store_v2f64_a32(ptr %p, <2 x double> %v) {
 ; CHECK-LABEL: store_v2f64_a32:
 ; CHECK:         .functype store_v2f64_a32 (i32, v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1521,6 +1521,6 @@ define void @store_v2f64_a32(<2 x double> *%p, <2 x double> %v) {
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x double> %v, <2 x double>* %p, align 32
+  store <2 x double> %v, ptr %p, align 32
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll
index 401bf463c776..81487f300cdf 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll
@@ -9,47 +9,47 @@ target triple = "wasm32-unknown-unknown"
 ; v128.load32_zero
 ;===----------------------------------------------------------------------------
 
-define <4 x i32> @load_zero_i32_no_offset(i32* %p) {
+define <4 x i32> @load_zero_i32_no_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_no_offset:
 ; CHECK:         .functype load_zero_i32_no_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i32, i32* %p
+  %x = load i32, ptr %p
   %v = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_zero_i32_with_folded_offset(i32* %p) {
+define <4 x i32> @load_zero_i32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_with_folded_offset:
 ; CHECK:         .functype load_zero_i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_zero 24
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i32* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %x = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_zero_i32_with_folded_gep_offset(i32* %p) {
+define <4 x i32> @load_zero_i32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_with_folded_gep_offset:
 ; CHECK:         .functype load_zero_i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_zero 24
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i32, i32* %p, i32 6
-  %x = load i32, i32* %s
+  %s = getelementptr inbounds i32, ptr %p, i32 6
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_zero_i32_with_unfolded_gep_negative_offset(i32* %p) {
+define <4 x i32> @load_zero_i32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_zero_i32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -58,13 +58,13 @@ define <4 x i32> @load_zero_i32_with_unfolded_gep_negative_offset(i32* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i32, i32* %p, i32 -6
-  %x = load i32, i32* %s
+  %s = getelementptr inbounds i32, ptr %p, i32 -6
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_zero_i32_with_unfolded_offset(i32* %p) {
+define <4 x i32> @load_zero_i32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_with_unfolded_offset:
 ; CHECK:         .functype load_zero_i32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -73,15 +73,15 @@ define <4 x i32> @load_zero_i32_with_unfolded_offset(i32* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i32* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i32*
-  %x = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %t
 }
 
-define <4 x i32> @load_zero_i32_with_unfolded_gep_offset(i32* %p) {
+define <4 x i32> @load_zero_i32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_zero_i32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -90,8 +90,8 @@ define <4 x i32> @load_zero_i32_with_unfolded_gep_offset(i32* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i32, i32* %p, i32 6
-  %x = load i32, i32* %s
+  %s = getelementptr i32, ptr %p, i32 6
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %t
 }
@@ -103,8 +103,8 @@ define <4 x i32> @load_zero_i32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load32_zero 42
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i32*
-  %x = load i32, i32* %s
+  %s = inttoptr i32 42 to ptr
+  %x = load i32, ptr %s
   %t = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %t
 }
@@ -117,7 +117,7 @@ define <4 x i32> @load_zero_i32_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load32_zero gv_i32
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i32, i32* @gv_i32
+  %x = load i32, ptr @gv_i32
   %t = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
   ret <4 x i32> %t
 }
@@ -126,47 +126,47 @@ define <4 x i32> @load_zero_i32_from_global_address() {
 ; v128.load64_zero
 ;===----------------------------------------------------------------------------
 
-define <2 x i64> @load_zero_i64_no_offset(i64* %p) {
+define <2 x i64> @load_zero_i64_no_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_no_offset:
 ; CHECK:         .functype load_zero_i64_no_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* %p
+  %x = load i64, ptr %p
   %v = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_zero_i64_with_folded_offset(i64* %p) {
+define <2 x i64> @load_zero_i64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_with_folded_offset:
 ; CHECK:         .functype load_zero_i64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 24
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i64* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %x = load i64, i64* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_zero_i64_with_folded_gep_offset(i64* %p) {
+define <2 x i64> @load_zero_i64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_with_folded_gep_offset:
 ; CHECK:         .functype load_zero_i64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 48
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i64, i64* %p, i64 6
-  %x = load i64, i64* %s
+  %s = getelementptr inbounds i64, ptr %p, i64 6
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_zero_i64_with_unfolded_gep_negative_offset(i64* %p) {
+define <2 x i64> @load_zero_i64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_zero_i64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -175,13 +175,13 @@ define <2 x i64> @load_zero_i64_with_unfolded_gep_negative_offset(i64* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i64, i64* %p, i64 -6
-  %x = load i64, i64* %s
+  %s = getelementptr inbounds i64, ptr %p, i64 -6
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_zero_i64_with_unfolded_offset(i64* %p) {
+define <2 x i64> @load_zero_i64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_with_unfolded_offset:
 ; CHECK:         .functype load_zero_i64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -190,15 +190,15 @@ define <2 x i64> @load_zero_i64_with_unfolded_offset(i64* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i64* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 24
-  %s = inttoptr i32 %r to i64*
-  %x = load i64, i64* %s
+  %s = inttoptr i32 %r to ptr
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %t
 }
 
-define <2 x i64> @load_zero_i64_with_unfolded_gep_offset(i64* %p) {
+define <2 x i64> @load_zero_i64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zero_i64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_zero_i64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -207,8 +207,8 @@ define <2 x i64> @load_zero_i64_with_unfolded_gep_offset(i64* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i64, i64* %p, i64 6
-  %x = load i64, i64* %s
+  %s = getelementptr i64, ptr %p, i64 6
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %t
 }
@@ -220,8 +220,8 @@ define <2 x i64> @load_zero_i64_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_zero 42
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 42 to i64*
-  %x = load i64, i64* %s
+  %s = inttoptr i32 42 to ptr
+  %x = load i64, ptr %s
   %t = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %t
 }
@@ -234,7 +234,7 @@ define <2 x i64> @load_zero_i64_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_zero gv_i64
 ; CHECK-NEXT:    # fallthrough-return
-  %x = load i64, i64* @gv_i64
+  %x = load i64, ptr @gv_i64
   %t = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
   ret <2 x i64> %t
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-offset.ll
index 47ffc488eca3..f317edca549d 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-offset.ll
@@ -8,87 +8,87 @@ target triple = "wasm32-unknown-unknown"
 ; ==============================================================================
 ; 16 x i8
 ; ==============================================================================
-define <16 x i8> @load_v16i8(<16 x i8>* %p) {
+define <16 x i8> @load_v16i8(ptr %p) {
 ; CHECK-LABEL: load_v16i8:
 ; CHECK:         .functype load_v16i8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <16 x i8>, <16 x i8>* %p
+  %v = load <16 x i8>, ptr %p
   ret <16 x i8> %v
 }
 
-define <16 x i8> @load_splat_v16i8(i8* %p) {
+define <16 x i8> @load_splat_v16i8(ptr %p) {
 ; CHECK-LABEL: load_splat_v16i8:
 ; CHECK:         .functype load_splat_v16i8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load8_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i8, i8* %p
+  %e = load i8, ptr %p
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
 }
 
-define <16 x i8> @load_v16i8_with_folded_offset(<16 x i8>* %p) {
+define <16 x i8> @load_v16i8_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_v16i8_with_folded_offset:
 ; CHECK:         .functype load_v16i8_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <16 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <16 x i8>*
-  %v = load <16 x i8>, <16 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <16 x i8>, ptr %s
   ret <16 x i8> %v
 }
 
-define <16 x i8> @load_splat_v16i8_with_folded_offset(i8* %p) {
+define <16 x i8> @load_splat_v16i8_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v16i8_with_folded_offset:
 ; CHECK:         .functype load_splat_v16i8_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load8_splat 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i8* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to i8*
-  %e = load i8, i8* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load i8, ptr %s
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
 }
 
-define <16 x i8> @load_v16i8_with_folded_gep_offset(<16 x i8>* %p) {
+define <16 x i8> @load_v16i8_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v16i8_with_folded_gep_offset:
 ; CHECK:         .functype load_v16i8_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 1
-  %v = load <16 x i8>, <16 x i8>* %s
+  %s = getelementptr inbounds <16 x i8>, ptr %p, i32 1
+  %v = load <16 x i8>, ptr %s
   ret <16 x i8> %v
 }
 
-define <16 x i8> @load_splat_v16i8_with_folded_gep_offset(i8* %p) {
+define <16 x i8> @load_splat_v16i8_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v16i8_with_folded_gep_offset:
 ; CHECK:         .functype load_splat_v16i8_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load8_splat 1
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i8, i8* %p, i32 1
-  %e = load i8, i8* %s
+  %s = getelementptr inbounds i8, ptr %p, i32 1
+  %e = load i8, ptr %s
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
 }
 
-define <16 x i8> @load_v16i8_with_unfolded_gep_negative_offset(<16 x i8>* %p) {
+define <16 x i8> @load_v16i8_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_v16i8_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_v16i8_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -97,12 +97,12 @@ define <16 x i8> @load_v16i8_with_unfolded_gep_negative_offset(<16 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 -1
-  %v = load <16 x i8>, <16 x i8>* %s
+  %s = getelementptr inbounds <16 x i8>, ptr %p, i32 -1
+  %v = load <16 x i8>, ptr %s
   ret <16 x i8> %v
 }
 
-define <16 x i8> @load_splat_v16i8_with_unfolded_gep_negative_offset(i8* %p) {
+define <16 x i8> @load_splat_v16i8_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v16i8_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_splat_v16i8_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -111,14 +111,14 @@ define <16 x i8> @load_splat_v16i8_with_unfolded_gep_negative_offset(i8* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load8_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i8, i8* %p, i32 -1
-  %e = load i8, i8* %s
+  %s = getelementptr inbounds i8, ptr %p, i32 -1
+  %e = load i8, ptr %s
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
 }
 
-define <16 x i8> @load_v16i8_with_unfolded_offset(<16 x i8>* %p) {
+define <16 x i8> @load_v16i8_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_v16i8_with_unfolded_offset:
 ; CHECK:         .functype load_v16i8_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -127,14 +127,14 @@ define <16 x i8> @load_v16i8_with_unfolded_offset(<16 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <16 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <16 x i8>*
-  %v = load <16 x i8>, <16 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <16 x i8>, ptr %s
   ret <16 x i8> %v
 }
 
-define <16 x i8> @load_splat_v16i8_with_unfolded_offset(i8* %p) {
+define <16 x i8> @load_splat_v16i8_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v16i8_with_unfolded_offset:
 ; CHECK:         .functype load_splat_v16i8_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -143,16 +143,16 @@ define <16 x i8> @load_splat_v16i8_with_unfolded_offset(i8* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load8_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i8* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to i8*
-  %e = load i8, i8* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load i8, ptr %s
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
 }
 
-define <16 x i8> @load_v16i8_with_unfolded_gep_offset(<16 x i8>* %p) {
+define <16 x i8> @load_v16i8_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v16i8_with_unfolded_gep_offset:
 ; CHECK:         .functype load_v16i8_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -161,12 +161,12 @@ define <16 x i8> @load_v16i8_with_unfolded_gep_offset(<16 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <16 x i8>, <16 x i8>* %p, i32 1
-  %v = load <16 x i8>, <16 x i8>* %s
+  %s = getelementptr <16 x i8>, ptr %p, i32 1
+  %v = load <16 x i8>, ptr %s
   ret <16 x i8> %v
 }
 
-define <16 x i8> @load_splat_v16i8_with_unfolded_gep_offset(i8* %p) {
+define <16 x i8> @load_splat_v16i8_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v16i8_with_unfolded_gep_offset:
 ; CHECK:         .functype load_splat_v16i8_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -175,8 +175,8 @@ define <16 x i8> @load_splat_v16i8_with_unfolded_gep_offset(i8* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load8_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i8, i8* %p, i32 1
-  %e = load i8, i8* %s
+  %s = getelementptr i8, ptr %p, i32 1
+  %e = load i8, ptr %s
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
@@ -189,8 +189,8 @@ define <16 x i8> @load_v16i8_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <16 x i8>*
-  %v = load <16 x i8>, <16 x i8>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <16 x i8>, ptr %s
   ret <16 x i8> %v
 }
 
@@ -201,8 +201,8 @@ define <16 x i8> @load_splat_v16i8_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load8_splat 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to i8*
-  %e = load i8, i8* %s
+  %s = inttoptr i32 32 to ptr
+  %e = load i8, ptr %s
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
@@ -216,7 +216,7 @@ define <16 x i8> @load_v16i8_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load gv_v16i8
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <16 x i8>, <16 x i8>* @gv_v16i8
+  %v = load <16 x i8>, ptr @gv_v16i8
   ret <16 x i8> %v
 }
 
@@ -228,13 +228,13 @@ define <16 x i8> @load_splat_v16i8_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load8_splat gv_i8
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i8, i8* @gv_i8
+  %e = load i8, ptr @gv_i8
   %v1 = insertelement <16 x i8> undef, i8 %e, i32 0
   %v2 = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %v2
 }
 
-define void @store_v16i8(<16 x i8> %v, <16 x i8>* %p) {
+define void @store_v16i8(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_v16i8:
 ; CHECK:         .functype store_v16i8 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -242,11 +242,11 @@ define void @store_v16i8(<16 x i8> %v, <16 x i8>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <16 x i8> %v , <16 x i8>* %p
+  store <16 x i8> %v , ptr %p
   ret void
 }
 
-define void @store_v16i8_with_folded_offset(<16 x i8> %v, <16 x i8>* %p) {
+define void @store_v16i8_with_folded_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_v16i8_with_folded_offset:
 ; CHECK:         .functype store_v16i8_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -254,14 +254,14 @@ define void @store_v16i8_with_folded_offset(<16 x i8> %v, <16 x i8>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <16 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <16 x i8>*
-  store <16 x i8> %v , <16 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  store <16 x i8> %v , ptr %s
   ret void
 }
 
-define void @store_v16i8_with_folded_gep_offset(<16 x i8> %v, <16 x i8>* %p) {
+define void @store_v16i8_with_folded_gep_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_v16i8_with_folded_gep_offset:
 ; CHECK:         .functype store_v16i8_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -269,12 +269,12 @@ define void @store_v16i8_with_folded_gep_offset(<16 x i8> %v, <16 x i8>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 1
-  store <16 x i8> %v , <16 x i8>* %s
+  %s = getelementptr inbounds <16 x i8>, ptr %p, i32 1
+  store <16 x i8> %v , ptr %s
   ret void
 }
 
-define void @store_v16i8_with_unfolded_gep_negative_offset(<16 x i8> %v, <16 x i8>* %p) {
+define void @store_v16i8_with_unfolded_gep_negative_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_v16i8_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_v16i8_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -284,12 +284,12 @@ define void @store_v16i8_with_unfolded_gep_negative_offset(<16 x i8> %v, <16 x i
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <16 x i8>, <16 x i8>* %p, i32 -1
-  store <16 x i8> %v , <16 x i8>* %s
+  %s = getelementptr inbounds <16 x i8>, ptr %p, i32 -1
+  store <16 x i8> %v , ptr %s
   ret void
 }
 
-define void @store_v16i8_with_unfolded_offset(<16 x i8> %v, <16 x i8>* %p) {
+define void @store_v16i8_with_unfolded_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_v16i8_with_unfolded_offset:
 ; CHECK:         .functype store_v16i8_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -299,14 +299,14 @@ define void @store_v16i8_with_unfolded_offset(<16 x i8> %v, <16 x i8>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <16 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <16 x i8>*
-  store <16 x i8> %v , <16 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  store <16 x i8> %v , ptr %s
   ret void
 }
 
-define void @store_v16i8_with_unfolded_gep_offset(<16 x i8> %v, <16 x i8>* %p) {
+define void @store_v16i8_with_unfolded_gep_offset(<16 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_v16i8_with_unfolded_gep_offset:
 ; CHECK:         .functype store_v16i8_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -316,8 +316,8 @@ define void @store_v16i8_with_unfolded_gep_offset(<16 x i8> %v, <16 x i8>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <16 x i8>, <16 x i8>* %p, i32 1
-  store <16 x i8> %v , <16 x i8>* %s
+  %s = getelementptr <16 x i8>, ptr %p, i32 1
+  store <16 x i8> %v , ptr %s
   ret void
 }
 
@@ -329,8 +329,8 @@ define void @store_v16i8_to_numeric_address(<16 x i8> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <16 x i8>*
-  store <16 x i8> %v , <16 x i8>* %s
+  %s = inttoptr i32 32 to ptr
+  store <16 x i8> %v , ptr %s
   ret void
 }
 
@@ -342,211 +342,211 @@ define void @store_v16i8_to_global_address(<16 x i8> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store gv_v16i8
 ; CHECK-NEXT:    # fallthrough-return
-  store <16 x i8> %v , <16 x i8>* @gv_v16i8
+  store <16 x i8> %v , ptr @gv_v16i8
   ret void
 }
 
 ; ==============================================================================
 ; 8 x i16
 ; ==============================================================================
-define <8 x i16> @load_v8i16(<8 x i16>* %p) {
+define <8 x i16> @load_v8i16(ptr %p) {
 ; CHECK-LABEL: load_v8i16:
 ; CHECK:         .functype load_v8i16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i16>, <8 x i16>* %p
+  %v = load <8 x i16>, ptr %p
   ret <8 x i16> %v
 }
 
-define <8 x i16> @load_splat_v8i16(i16* %p) {
+define <8 x i16> @load_splat_v8i16(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16:
 ; CHECK:         .functype load_splat_v8i16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load16_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i16, i16* %p
+  %e = load i16, ptr %p
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_sext_v8i16(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16:
 ; CHECK:         .functype load_sext_v8i16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p
+  %v = load <8 x i8>, ptr %p
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_zext_v8i16(<8 x i8>* %p) {
+define <8 x i16> @load_zext_v8i16(ptr %p) {
 ; CHECK-LABEL: load_zext_v8i16:
 ; CHECK:         .functype load_zext_v8i16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p
+  %v = load <8 x i8>, ptr %p
   %v2 = zext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i8> @load_ext_v8i16(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16:
 ; CHECK:         .functype load_ext_v8i16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* %p
+  %v = load <8 x i8>, ptr %p
   ret <8 x i8> %v
 }
 
-define <8 x i16> @load_v8i16_with_folded_offset(<8 x i16>* %p) {
+define <8 x i16> @load_v8i16_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_v8i16_with_folded_offset:
 ; CHECK:         .functype load_v8i16_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i16>*
-  %v = load <8 x i16>, <8 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <8 x i16>, ptr %s
   ret <8 x i16> %v
 }
 
-define <8 x i16> @load_splat_v8i16_with_folded_offset(i16* %p) {
+define <8 x i16> @load_splat_v8i16_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16_with_folded_offset:
 ; CHECK:         .functype load_splat_v8i16_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load16_splat 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i16* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to i16*
-  %e = load i16, i16* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load i16, ptr %s
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_sext_v8i16_with_folded_offset(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_with_folded_offset:
 ; CHECK:         .functype load_sext_v8i16_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_s 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <8 x i8>, ptr %s
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_zext_v8i16_with_folded_offset(<8 x i8>* %p) {
+define <8 x i16> @load_zext_v8i16_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v8i16_with_folded_offset:
 ; CHECK:         .functype load_zext_v8i16_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_u 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <8 x i8>, ptr %s
   %v2 = zext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i8> @load_ext_v8i16_with_folded_offset(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_with_folded_offset:
 ; CHECK:         .functype load_ext_v8i16_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <8 x i8>, ptr %s
   ret <8 x i8> %v
 }
 
-define <8 x i16> @load_v8i16_with_folded_gep_offset(<8 x i16>* %p) {
+define <8 x i16> @load_v8i16_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v8i16_with_folded_gep_offset:
 ; CHECK:         .functype load_v8i16_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 1
-  %v = load <8 x i16>, <8 x i16>* %s
+  %s = getelementptr inbounds <8 x i16>, ptr %p, i32 1
+  %v = load <8 x i16>, ptr %s
   ret <8 x i16> %v
 }
 
-define <8 x i16> @load_splat_v8i16_with_folded_gep_offset(i16* %p) {
+define <8 x i16> @load_splat_v8i16_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16_with_folded_gep_offset:
 ; CHECK:         .functype load_splat_v8i16_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load16_splat 2
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i16, i16* %p, i32 1
-  %e = load i16, i16* %s
+  %s = getelementptr inbounds i16, ptr %p, i32 1
+  %e = load i16, ptr %s
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_sext_v8i16_with_folded_gep_offset(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_with_folded_gep_offset:
 ; CHECK:         .functype load_sext_v8i16_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_s 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr inbounds <8 x i8>, ptr %p, i32 1
+  %v = load <8 x i8>, ptr %s
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_zext_v8i16_with_folded_gep_offset(<8 x i8>* %p) {
+define <8 x i16> @load_zext_v8i16_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v8i16_with_folded_gep_offset:
 ; CHECK:         .functype load_zext_v8i16_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i16x8.load8x8_u 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr inbounds <8 x i8>, ptr %p, i32 1
+  %v = load <8 x i8>, ptr %s
   %v2 = zext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i8> @load_ext_v8i16_with_folded_gep_offset(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_with_folded_gep_offset:
 ; CHECK:         .functype load_ext_v8i16_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr inbounds <8 x i8>, ptr %p, i32 1
+  %v = load <8 x i8>, ptr %s
   ret <8 x i8> %v
 }
 
-define <8 x i16> @load_v8i16_with_unfolded_gep_negative_offset(<8 x i16>* %p) {
+define <8 x i16> @load_v8i16_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_v8i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_v8i16_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -555,12 +555,12 @@ define <8 x i16> @load_v8i16_with_unfolded_gep_negative_offset(<8 x i16>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 -1
-  %v = load <8 x i16>, <8 x i16>* %s
+  %s = getelementptr inbounds <8 x i16>, ptr %p, i32 -1
+  %v = load <8 x i16>, ptr %s
   ret <8 x i16> %v
 }
 
-define <8 x i16> @load_splat_v8i16_with_unfolded_gep_negative_offset(i16* %p) {
+define <8 x i16> @load_splat_v8i16_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_splat_v8i16_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -569,14 +569,14 @@ define <8 x i16> @load_splat_v8i16_with_unfolded_gep_negative_offset(i16* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load16_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i16, i16* %p, i32 -1
-  %e = load i16, i16* %s
+  %s = getelementptr inbounds i16, ptr %p, i32 -1
+  %e = load i16, ptr %s
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_sext_v8i16_with_unfolded_gep_negative_offset(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_sext_v8i16_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -585,13 +585,13 @@ define <8 x i16> @load_sext_v8i16_with_unfolded_gep_negative_offset(<8 x i8>* %p
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i16x8.load8x8_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 -1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr inbounds <8 x i8>, ptr %p, i32 -1
+  %v = load <8 x i8>, ptr %s
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_zext_v8i16_with_unfolded_gep_negative_offset(<8 x i8>* %p) {
+define <8 x i16> @load_zext_v8i16_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v8i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_zext_v8i16_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -600,13 +600,13 @@ define <8 x i16> @load_zext_v8i16_with_unfolded_gep_negative_offset(<8 x i8>* %p
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i16x8.load8x8_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 -1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr inbounds <8 x i8>, ptr %p, i32 -1
+  %v = load <8 x i8>, ptr %s
   %v2 = zext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i8> @load_ext_v8i16_with_unfolded_gep_negative_offset(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_ext_v8i16_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -615,12 +615,12 @@ define <8 x i8> @load_ext_v8i16_with_unfolded_gep_negative_offset(<8 x i8>* %p)
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 -1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr inbounds <8 x i8>, ptr %p, i32 -1
+  %v = load <8 x i8>, ptr %s
   ret <8 x i8> %v
 }
 
-define <8 x i16> @load_v8i16_with_unfolded_offset(<8 x i16>* %p) {
+define <8 x i16> @load_v8i16_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_v8i16_with_unfolded_offset:
 ; CHECK:         .functype load_v8i16_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -629,14 +629,14 @@ define <8 x i16> @load_v8i16_with_unfolded_offset(<8 x i16>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i16>*
-  %v = load <8 x i16>, <8 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <8 x i16>, ptr %s
   ret <8 x i16> %v
 }
 
-define <8 x i16> @load_splat_v8i16_with_unfolded_offset(i16* %p) {
+define <8 x i16> @load_splat_v8i16_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16_with_unfolded_offset:
 ; CHECK:         .functype load_splat_v8i16_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -645,16 +645,16 @@ define <8 x i16> @load_splat_v8i16_with_unfolded_offset(i16* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load16_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i16* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to i16*
-  %e = load i16, i16* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load i16, ptr %s
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_sext_v8i16_with_unfolded_offset(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_with_unfolded_offset:
 ; CHECK:         .functype load_sext_v8i16_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -663,15 +663,15 @@ define <8 x i16> @load_sext_v8i16_with_unfolded_offset(<8 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i16x8.load8x8_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <8 x i8>, ptr %s
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_zext_v8i16_with_unfolded_offset(<8 x i8>* %p) {
+define <8 x i16> @load_zext_v8i16_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v8i16_with_unfolded_offset:
 ; CHECK:         .functype load_zext_v8i16_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -680,15 +680,15 @@ define <8 x i16> @load_zext_v8i16_with_unfolded_offset(<8 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i16x8.load8x8_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <8 x i8>, ptr %s
   %v2 = zext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i8> @load_ext_v8i16_with_unfolded_offset(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_with_unfolded_offset:
 ; CHECK:         .functype load_ext_v8i16_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -697,14 +697,14 @@ define <8 x i8> @load_ext_v8i16_with_unfolded_offset(<8 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <8 x i8>, ptr %s
   ret <8 x i8> %v
 }
 
-define <8 x i16> @load_v8i16_with_unfolded_gep_offset(<8 x i16>* %p) {
+define <8 x i16> @load_v8i16_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v8i16_with_unfolded_gep_offset:
 ; CHECK:         .functype load_v8i16_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -713,12 +713,12 @@ define <8 x i16> @load_v8i16_with_unfolded_gep_offset(<8 x i16>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <8 x i16>, <8 x i16>* %p, i32 1
-  %v = load <8 x i16>, <8 x i16>* %s
+  %s = getelementptr <8 x i16>, ptr %p, i32 1
+  %v = load <8 x i16>, ptr %s
   ret <8 x i16> %v
 }
 
-define <8 x i16> @load_splat_v8i16_with_unfolded_gep_offset(i16* %p) {
+define <8 x i16> @load_splat_v8i16_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v8i16_with_unfolded_gep_offset:
 ; CHECK:         .functype load_splat_v8i16_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -727,14 +727,14 @@ define <8 x i16> @load_splat_v8i16_with_unfolded_gep_offset(i16* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load16_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i16, i16* %p, i32 1
-  %e = load i16, i16* %s
+  %s = getelementptr i16, ptr %p, i32 1
+  %e = load i16, ptr %s
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_sext_v8i16_with_unfolded_gep_offset(<8 x i8>* %p) {
+define <8 x i16> @load_sext_v8i16_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v8i16_with_unfolded_gep_offset:
 ; CHECK:         .functype load_sext_v8i16_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -743,13 +743,13 @@ define <8 x i16> @load_sext_v8i16_with_unfolded_gep_offset(<8 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i16x8.load8x8_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <8 x i8>, <8 x i8>* %p, i32 1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr <8 x i8>, ptr %p, i32 1
+  %v = load <8 x i8>, ptr %s
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i16> @load_zext_v8i16_with_unfolded_gep_offset(<8 x i8>* %p) {
+define <8 x i16> @load_zext_v8i16_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v8i16_with_unfolded_gep_offset:
 ; CHECK:         .functype load_zext_v8i16_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -758,13 +758,13 @@ define <8 x i16> @load_zext_v8i16_with_unfolded_gep_offset(<8 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i16x8.load8x8_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <8 x i8>, <8 x i8>* %p, i32 1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr <8 x i8>, ptr %p, i32 1
+  %v = load <8 x i8>, ptr %s
   %v2 = zext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
 
-define <8 x i8> @load_ext_v8i16_with_unfolded_gep_offset(<8 x i8>* %p) {
+define <8 x i8> @load_ext_v8i16_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v8i16_with_unfolded_gep_offset:
 ; CHECK:         .functype load_ext_v8i16_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -773,8 +773,8 @@ define <8 x i8> @load_ext_v8i16_with_unfolded_gep_offset(<8 x i8>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <8 x i8>, <8 x i8>* %p, i32 1
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = getelementptr <8 x i8>, ptr %p, i32 1
+  %v = load <8 x i8>, ptr %s
   ret <8 x i8> %v
 }
 
@@ -785,8 +785,8 @@ define <8 x i16> @load_v8i16_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <8 x i16>*
-  %v = load <8 x i16>, <8 x i16>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <8 x i16>, ptr %s
   ret <8 x i16> %v
 }
 
@@ -797,8 +797,8 @@ define <8 x i16> @load_splat_v8i16_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load16_splat 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to i16*
-  %e = load i16, i16* %s
+  %s = inttoptr i32 32 to ptr
+  %e = load i16, ptr %s
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
@@ -811,8 +811,8 @@ define <8 x i16> @load_sext_v8i16_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i16x8.load8x8_s 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <8 x i8>, ptr %s
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
@@ -824,8 +824,8 @@ define <8 x i16> @load_zext_v8i16_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i16x8.load8x8_u 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <8 x i8>, ptr %s
   %v2 = zext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
@@ -837,8 +837,8 @@ define <8 x i8> @load_ext_v8i16_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_zero 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <8 x i8>*
-  %v = load <8 x i8>, <8 x i8>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <8 x i8>, ptr %s
   ret <8 x i8> %v
 }
 
@@ -850,7 +850,7 @@ define <8 x i16> @load_v8i16_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load gv_v8i16
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i16>, <8 x i16>* @gv_v8i16
+  %v = load <8 x i16>, ptr @gv_v8i16
   ret <8 x i16> %v
 }
 
@@ -862,7 +862,7 @@ define <8 x i16> @load_splat_v8i16_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load16_splat gv_i16
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i16, i16* @gv_i16
+  %e = load i16, ptr @gv_i16
   %v1 = insertelement <8 x i16> undef, i16 %e, i32 0
   %v2 = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %v2
@@ -876,7 +876,7 @@ define <8 x i16> @load_sext_v8i16_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i16x8.load8x8_s gv_v8i8
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* @gv_v8i8
+  %v = load <8 x i8>, ptr @gv_v8i8
   %v2 = sext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
@@ -888,7 +888,7 @@ define <8 x i16> @load_zext_v8i16_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i16x8.load8x8_u gv_v8i8
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* @gv_v8i8
+  %v = load <8 x i8>, ptr @gv_v8i8
   %v2 = zext <8 x i8> %v to <8 x i16>
   ret <8 x i16> %v2
 }
@@ -900,12 +900,12 @@ define <8 x i8> @load_ext_v8i16_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_zero gv_v8i8
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <8 x i8>, <8 x i8>* @gv_v8i8
+  %v = load <8 x i8>, ptr @gv_v8i8
   ret <8 x i8> %v
 }
 
 
-define void @store_v8i16(<8 x i16> %v, <8 x i16>* %p) {
+define void @store_v8i16(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_v8i16:
 ; CHECK:         .functype store_v8i16 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -913,11 +913,11 @@ define void @store_v8i16(<8 x i16> %v, <8 x i16>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <8 x i16> %v , <8 x i16>* %p
+  store <8 x i16> %v , ptr %p
   ret void
 }
 
-define void @store_narrowing_v8i16(<8 x i8> %v, <8 x i8>* %p) {
+define void @store_narrowing_v8i16(<8 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v8i16:
 ; CHECK:         .functype store_narrowing_v8i16 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -925,11 +925,11 @@ define void @store_narrowing_v8i16(<8 x i8> %v, <8 x i8>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <8 x i8> %v, <8 x i8>* %p
+  store <8 x i8> %v, ptr %p
   ret void
 }
 
-define void @store_v8i16_with_folded_offset(<8 x i16> %v, <8 x i16>* %p) {
+define void @store_v8i16_with_folded_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_v8i16_with_folded_offset:
 ; CHECK:         .functype store_v8i16_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -937,14 +937,14 @@ define void @store_v8i16_with_folded_offset(<8 x i16> %v, <8 x i16>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i16>*
-  store <8 x i16> %v , <8 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  store <8 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v8i16_with_folded_offset(<8 x i8> %v, <8 x i8>* %p) {
+define void @store_narrowing_v8i16_with_folded_offset(<8 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v8i16_with_folded_offset:
 ; CHECK:         .functype store_narrowing_v8i16_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -952,14 +952,14 @@ define void @store_narrowing_v8i16_with_folded_offset(<8 x i8> %v, <8 x i8>* %p)
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 16, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i8>*
-  store <8 x i8> %v , <8 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  store <8 x i8> %v , ptr %s
   ret void
 }
 
-define void @store_v8i16_with_folded_gep_offset(<8 x i16> %v, <8 x i16>* %p) {
+define void @store_v8i16_with_folded_gep_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_v8i16_with_folded_gep_offset:
 ; CHECK:         .functype store_v8i16_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -967,12 +967,12 @@ define void @store_v8i16_with_folded_gep_offset(<8 x i16> %v, <8 x i16>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 1
-  store <8 x i16> %v , <8 x i16>* %s
+  %s = getelementptr inbounds <8 x i16>, ptr %p, i32 1
+  store <8 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v8i16_with_folded_gep_offset(<8 x i8> %v, <8 x i8>* %p) {
+define void @store_narrowing_v8i16_with_folded_gep_offset(<8 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v8i16_with_folded_gep_offset:
 ; CHECK:         .functype store_narrowing_v8i16_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -980,12 +980,12 @@ define void @store_narrowing_v8i16_with_folded_gep_offset(<8 x i8> %v, <8 x i8>*
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 8, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 1
-  store <8 x i8> %v , <8 x i8>* %s
+  %s = getelementptr inbounds <8 x i8>, ptr %p, i32 1
+  store <8 x i8> %v , ptr %s
   ret void
 }
 
-define void @store_v8i16_with_unfolded_gep_negative_offset(<8 x i16> %v, <8 x i16>* %p) {
+define void @store_v8i16_with_unfolded_gep_negative_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_v8i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_v8i16_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -995,12 +995,12 @@ define void @store_v8i16_with_unfolded_gep_negative_offset(<8 x i16> %v, <8 x i1
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i32 -1
-  store <8 x i16> %v , <8 x i16>* %s
+  %s = getelementptr inbounds <8 x i16>, ptr %p, i32 -1
+  store <8 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v8i16_with_unfolded_gep_negative_offset(<8 x i8> %v, <8 x i8>* %p) {
+define void @store_narrowing_v8i16_with_unfolded_gep_negative_offset(<8 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v8i16_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_narrowing_v8i16_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1010,12 +1010,12 @@ define void @store_narrowing_v8i16_with_unfolded_gep_negative_offset(<8 x i8> %v
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 -1
-  store <8 x i8> %v , <8 x i8>* %s
+  %s = getelementptr inbounds <8 x i8>, ptr %p, i32 -1
+  store <8 x i8> %v , ptr %s
   ret void
 }
 
-define void @store_v8i16_with_unfolded_offset(<8 x i16> %v, <8 x i16>* %p) {
+define void @store_v8i16_with_unfolded_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_v8i16_with_unfolded_offset:
 ; CHECK:         .functype store_v8i16_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1025,14 +1025,14 @@ define void @store_v8i16_with_unfolded_offset(<8 x i16> %v, <8 x i16>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i16>*
-  store <8 x i16> %v , <8 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  store <8 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v8i16_with_unfolded_offset(<8 x i8> %v, <8 x i8>* %p) {
+define void @store_narrowing_v8i16_with_unfolded_offset(<8 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v8i16_with_unfolded_offset:
 ; CHECK:         .functype store_narrowing_v8i16_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1042,14 +1042,14 @@ define void @store_narrowing_v8i16_with_unfolded_offset(<8 x i8> %v, <8 x i8>* %
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <8 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <8 x i8>*
-  store <8 x i8> %v , <8 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  store <8 x i8> %v , ptr %s
   ret void
 }
 
-define void @store_v8i16_with_unfolded_gep_offset(<8 x i16> %v, <8 x i16>* %p) {
+define void @store_v8i16_with_unfolded_gep_offset(<8 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_v8i16_with_unfolded_gep_offset:
 ; CHECK:         .functype store_v8i16_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1059,12 +1059,12 @@ define void @store_v8i16_with_unfolded_gep_offset(<8 x i16> %v, <8 x i16>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <8 x i16>, <8 x i16>* %p, i32 1
-  store <8 x i16> %v , <8 x i16>* %s
+  %s = getelementptr <8 x i16>, ptr %p, i32 1
+  store <8 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v8i16_with_unfolded_gep_offset(<8 x i8> %v, <8 x i8>* %p) {
+define void @store_narrowing_v8i16_with_unfolded_gep_offset(<8 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v8i16_with_unfolded_gep_offset:
 ; CHECK:         .functype store_narrowing_v8i16_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1074,8 +1074,8 @@ define void @store_narrowing_v8i16_with_unfolded_gep_offset(<8 x i8> %v, <8 x i8
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <8 x i8>, <8 x i8>* %p, i32 1
-  store <8 x i8> %v , <8 x i8>* %s
+  %s = getelementptr <8 x i8>, ptr %p, i32 1
+  store <8 x i8> %v , ptr %s
   ret void
 }
 
@@ -1087,12 +1087,12 @@ define void @store_v8i16_to_numeric_address(<8 x i16> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <8 x i16>*
-  store <8 x i16> %v , <8 x i16>* %s
+  %s = inttoptr i32 32 to ptr
+  store <8 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v8i16_to_numeric_address(<8 x i8> %v, <8 x i8>* %p) {
+define void @store_narrowing_v8i16_to_numeric_address(<8 x i8> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v8i16_to_numeric_address:
 ; CHECK:         .functype store_narrowing_v8i16_to_numeric_address (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1100,8 +1100,8 @@ define void @store_narrowing_v8i16_to_numeric_address(<8 x i8> %v, <8 x i8>* %p)
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 32, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <8 x i8>*
-  store <8 x i8> %v , <8 x i8>* %s
+  %s = inttoptr i32 32 to ptr
+  store <8 x i8> %v , ptr %s
   ret void
 }
 
@@ -1113,7 +1113,7 @@ define void @store_v8i16_to_global_address(<8 x i16> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store gv_v8i16
 ; CHECK-NEXT:    # fallthrough-return
-  store <8 x i16> %v , <8 x i16>* @gv_v8i16
+  store <8 x i16> %v , ptr @gv_v8i16
   ret void
 }
 
@@ -1125,62 +1125,62 @@ define void @store_narrowing_v8i16_to_global_address(<8 x i8> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane gv_v8i8, 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <8 x i8> %v , <8 x i8>* @gv_v8i8
+  store <8 x i8> %v , ptr @gv_v8i8
   ret void
 }
 
 ; ==============================================================================
 ; 4 x i32
 ; ==============================================================================
-define <4 x i32> @load_v4i32(<4 x i32>* %p) {
+define <4 x i32> @load_v4i32(ptr %p) {
 ; CHECK-LABEL: load_v4i32:
 ; CHECK:         .functype load_v4i32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i32>, <4 x i32>* %p
+  %v = load <4 x i32>, ptr %p
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_splat_v4i32(i32* %addr) {
+define <4 x i32> @load_splat_v4i32(ptr %addr) {
 ; CHECK-LABEL: load_splat_v4i32:
 ; CHECK:         .functype load_splat_v4i32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* %addr, align 4
+  %e = load i32, ptr %addr, align 4
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i16_to_v4i32(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i16_to_v4i32(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i16_to_v4i32:
 ; CHECK:         .functype load_sext_v4i16_to_v4i32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p
+  %v = load <4 x i16>, ptr %p
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i16_to_v4i32(<4 x i16>* %p) {
+define <4 x i32> @load_zext_v4i16_to_v4i32(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i16_to_v4i32:
 ; CHECK:         .functype load_zext_v4i16_to_v4i32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p
+  %v = load <4 x i16>, ptr %p
   %v2 = zext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i8_to_v4i32(<4 x i8>* %p) {
+define <4 x i32> @load_sext_v4i8_to_v4i32(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i8_to_v4i32:
 ; CHECK:         .functype load_sext_v4i8_to_v4i32 (i32) -> (v128)
 ; CHECK-NEXT:    .local v128
@@ -1194,12 +1194,12 @@ define <4 x i32> @load_sext_v4i8_to_v4i32(<4 x i8>* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32x4.shr_s
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i8>, <4 x i8>* %p
+  %v = load <4 x i8>, ptr %p
   %v2 = sext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i8_to_v4i32(<4 x i8>* %p) {
+define <4 x i32> @load_zext_v4i8_to_v4i32(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i8_to_v4i32:
 ; CHECK:         .functype load_zext_v4i8_to_v4i32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1208,83 +1208,83 @@ define <4 x i32> @load_zext_v4i8_to_v4i32(<4 x i8>* %p) {
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    i8x16.shuffle 16, 1, 2, 3, 17, 5, 6, 7, 18, 9, 10, 11, 19, 13, 14, 15
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i8>, <4 x i8>* %p
+  %v = load <4 x i8>, ptr %p
   %v2 = zext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i16> @load_ext_v4i32(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32:
 ; CHECK:         .functype load_ext_v4i32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* %p
+  %v = load <4 x i16>, ptr %p
   ret <4 x i16> %v
 }
 
-define <4 x i32> @load_v4i32_with_folded_offset(<4 x i32>* %p) {
+define <4 x i32> @load_v4i32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_v4i32_with_folded_offset:
 ; CHECK:         .functype load_v4i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i32>*
-  %v = load <4 x i32>, <4 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i32>, ptr %s
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_splat_v4i32_with_folded_offset(i32* %p) {
+define <4 x i32> @load_splat_v4i32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4i32_with_folded_offset:
 ; CHECK:         .functype load_splat_v4i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i32* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to i32*
-  %e = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load i32, ptr %s
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i16_to_v4i32_with_folded_offset(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i16_to_v4i32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i16_to_v4i32_with_folded_offset:
 ; CHECK:         .functype load_sext_v4i16_to_v4i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_s 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i16>, ptr %s
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i16_to_v4i32_with_folded_offset(<4 x i16>* %p) {
+define <4 x i32> @load_zext_v4i16_to_v4i32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i16_to_v4i32_with_folded_offset:
 ; CHECK:         .functype load_zext_v4i16_to_v4i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_u 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i16>, ptr %s
   %v2 = zext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i8_to_v4i32_with_folded_offset(<4 x i8>* %p) {
+define <4 x i32> @load_sext_v4i8_to_v4i32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i8_to_v4i32_with_folded_offset:
 ; CHECK:         .functype load_sext_v4i8_to_v4i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:    .local v128
@@ -1298,15 +1298,15 @@ define <4 x i32> @load_sext_v4i8_to_v4i32_with_folded_offset(<4 x i8>* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32x4.shr_s
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i8>*
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i8>, ptr %s
   %v2 = sext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i8_to_v4i32_with_folded_offset(<4 x i8>* %p) {
+define <4 x i32> @load_zext_v4i8_to_v4i32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i8_to_v4i32_with_folded_offset:
 ; CHECK:         .functype load_zext_v4i8_to_v4i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1315,81 +1315,81 @@ define <4 x i32> @load_zext_v4i8_to_v4i32_with_folded_offset(<4 x i8>* %p) {
 ; CHECK-NEXT:    v128.load32_zero 16
 ; CHECK-NEXT:    i8x16.shuffle 16, 1, 2, 3, 17, 5, 6, 7, 18, 9, 10, 11, 19, 13, 14, 15
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i8>*
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i8>, ptr %s
   %v2 = zext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i16> @load_ext_v4i32_with_folded_offset(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_with_folded_offset:
 ; CHECK:         .functype load_ext_v4i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i16>, ptr %s
   ret <4 x i16> %v
 }
 
-define <4 x i32> @load_v4i32_with_folded_gep_offset(<4 x i32>* %p) {
+define <4 x i32> @load_v4i32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype load_v4i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 1
-  %v = load <4 x i32>, <4 x i32>* %s
+  %s = getelementptr inbounds <4 x i32>, ptr %p, i32 1
+  %v = load <4 x i32>, ptr %s
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_splat_v4i32_with_folded_gep_offset(i32* %p) {
+define <4 x i32> @load_splat_v4i32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype load_splat_v4i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 4
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i32, i32* %p, i32 1
-  %e = load i32, i32* %s
+  %s = getelementptr inbounds i32, ptr %p, i32 1
+  %e = load i32, ptr %s
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i16_to_v4i32_with_folded_gep_offset(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i16_to_v4i32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i16_to_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype load_sext_v4i16_to_v4i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_s 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr inbounds <4 x i16>, ptr %p, i32 1
+  %v = load <4 x i16>, ptr %s
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i16_to_v4i32_with_folded_gep_offset(<4 x i16>* %p) {
+define <4 x i32> @load_zext_v4i16_to_v4i32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i16_to_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype load_zext_v4i16_to_v4i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32x4.load16x4_u 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr inbounds <4 x i16>, ptr %p, i32 1
+  %v = load <4 x i16>, ptr %s
   %v2 = zext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i8_to_v4i32_with_folded_gep_offset(<4 x i8>* %p) {
+define <4 x i32> @load_sext_v4i8_to_v4i32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i8_to_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype load_sext_v4i8_to_v4i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:    .local v128
@@ -1403,13 +1403,13 @@ define <4 x i32> @load_sext_v4i8_to_v4i32_with_folded_gep_offset(<4 x i8>* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32x4.shr_s
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i8>, <4 x i8>* %p, i32 1
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = getelementptr inbounds <4 x i8>, ptr %p, i32 1
+  %v = load <4 x i8>, ptr %s
   %v2 = sext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i8_to_v4i32_with_folded_gep_offset(<4 x i8>* %p) {
+define <4 x i32> @load_zext_v4i8_to_v4i32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i8_to_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype load_zext_v4i8_to_v4i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1418,25 +1418,25 @@ define <4 x i32> @load_zext_v4i8_to_v4i32_with_folded_gep_offset(<4 x i8>* %p) {
 ; CHECK-NEXT:    v128.load32_zero 4
 ; CHECK-NEXT:    i8x16.shuffle 16, 1, 2, 3, 17, 5, 6, 7, 18, 9, 10, 11, 19, 13, 14, 15
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i8>, <4 x i8>* %p, i32 1
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = getelementptr inbounds <4 x i8>, ptr %p, i32 1
+  %v = load <4 x i8>, ptr %s
   %v2 = zext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i16> @load_ext_v4i32_with_folded_gep_offset(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype load_ext_v4i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr inbounds <4 x i16>, ptr %p, i32 1
+  %v = load <4 x i16>, ptr %s
   ret <4 x i16> %v
 }
 
-define <4 x i32> @load_v4i32_with_unfolded_gep_negative_offset(<4 x i32>* %p) {
+define <4 x i32> @load_v4i32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_v4i32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1445,12 +1445,12 @@ define <4 x i32> @load_v4i32_with_unfolded_gep_negative_offset(<4 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 -1
-  %v = load <4 x i32>, <4 x i32>* %s
+  %s = getelementptr inbounds <4 x i32>, ptr %p, i32 -1
+  %v = load <4 x i32>, ptr %s
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_splat_v4i32_with_unfolded_gep_negative_offset(i32* %p) {
+define <4 x i32> @load_splat_v4i32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_splat_v4i32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1459,14 +1459,14 @@ define <4 x i32> @load_splat_v4i32_with_unfolded_gep_negative_offset(i32* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i32, i32* %p, i32 -1
-  %e = load i32, i32* %s
+  %s = getelementptr inbounds i32, ptr %p, i32 -1
+  %e = load i32, ptr %s
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_gep_negative_offset(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i16_to_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_sext_v4i16_to_v4i32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1475,13 +1475,13 @@ define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_gep_negative_offset(<4
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i32x4.load16x4_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 -1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr inbounds <4 x i16>, ptr %p, i32 -1
+  %v = load <4 x i16>, ptr %s
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_gep_negative_offset(<4 x i16>* %p) {
+define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i16_to_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_zext_v4i16_to_v4i32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1490,13 +1490,13 @@ define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_gep_negative_offset(<4
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i32x4.load16x4_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 -1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr inbounds <4 x i16>, ptr %p, i32 -1
+  %v = load <4 x i16>, ptr %s
   %v2 = zext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_gep_negative_offset(<4 x i8>* %p) {
+define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i8_to_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_sext_v4i8_to_v4i32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:    .local v128
@@ -1512,13 +1512,13 @@ define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_gep_negative_offset(<4 x
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32x4.shr_s
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i8>, <4 x i8>* %p, i32 -1
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = getelementptr inbounds <4 x i8>, ptr %p, i32 -1
+  %v = load <4 x i8>, ptr %s
   %v2 = sext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_gep_negative_offset(<4 x i8>* %p) {
+define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i8_to_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_zext_v4i8_to_v4i32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1529,13 +1529,13 @@ define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_gep_negative_offset(<4 x
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    i8x16.shuffle 16, 1, 2, 3, 17, 5, 6, 7, 18, 9, 10, 11, 19, 13, 14, 15
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i8>, <4 x i8>* %p, i32 -1
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = getelementptr inbounds <4 x i8>, ptr %p, i32 -1
+  %v = load <4 x i8>, ptr %s
   %v2 = zext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i16> @load_ext_v4i32_with_unfolded_gep_negative_offset(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_ext_v4i32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1544,12 +1544,12 @@ define <4 x i16> @load_ext_v4i32_with_unfolded_gep_negative_offset(<4 x i16>* %p
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 -1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr inbounds <4 x i16>, ptr %p, i32 -1
+  %v = load <4 x i16>, ptr %s
   ret <4 x i16> %v
 }
 
-define <4 x i32> @load_v4i32_with_unfolded_offset(<4 x i32>* %p) {
+define <4 x i32> @load_v4i32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_v4i32_with_unfolded_offset:
 ; CHECK:         .functype load_v4i32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1558,14 +1558,14 @@ define <4 x i32> @load_v4i32_with_unfolded_offset(<4 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i32>*
-  %v = load <4 x i32>, <4 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i32>, ptr %s
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_splat_v4i32_with_unfolded_offset(i32* %p) {
+define <4 x i32> @load_splat_v4i32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4i32_with_unfolded_offset:
 ; CHECK:         .functype load_splat_v4i32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1574,16 +1574,16 @@ define <4 x i32> @load_splat_v4i32_with_unfolded_offset(i32* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i32* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to i32*
-  %e = load i32, i32* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load i32, ptr %s
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_offset(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i16_to_v4i32_with_unfolded_offset:
 ; CHECK:         .functype load_sext_v4i16_to_v4i32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1592,15 +1592,15 @@ define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_offset(<4 x i16>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i32x4.load16x4_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i16>, ptr %s
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_offset(<4 x i16>* %p) {
+define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i16_to_v4i32_with_unfolded_offset:
 ; CHECK:         .functype load_zext_v4i16_to_v4i32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1609,15 +1609,15 @@ define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_offset(<4 x i16>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i32x4.load16x4_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i16>, ptr %s
   %v2 = zext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_offset(<4 x i8>* %p) {
+define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i8_to_v4i32_with_unfolded_offset:
 ; CHECK:         .functype load_sext_v4i8_to_v4i32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:    .local v128
@@ -1633,15 +1633,15 @@ define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_offset(<4 x i8>* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32x4.shr_s
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i8>*
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i8>, ptr %s
   %v2 = sext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_offset(<4 x i8>* %p) {
+define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i8_to_v4i32_with_unfolded_offset:
 ; CHECK:         .functype load_zext_v4i8_to_v4i32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1652,15 +1652,15 @@ define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_offset(<4 x i8>* %p) {
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    i8x16.shuffle 16, 1, 2, 3, 17, 5, 6, 7, 18, 9, 10, 11, 19, 13, 14, 15
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i8>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i8>*
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i8>, ptr %s
   %v2 = zext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i16> @load_ext_v4i32_with_unfolded_offset(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_with_unfolded_offset:
 ; CHECK:         .functype load_ext_v4i32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1669,14 +1669,14 @@ define <4 x i16> @load_ext_v4i32_with_unfolded_offset(<4 x i16>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x i16>, ptr %s
   ret <4 x i16> %v
 }
 
-define <4 x i32> @load_v4i32_with_unfolded_gep_offset(<4 x i32>* %p) {
+define <4 x i32> @load_v4i32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_v4i32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1685,12 +1685,12 @@ define <4 x i32> @load_v4i32_with_unfolded_gep_offset(<4 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x i32>, <4 x i32>* %p, i32 1
-  %v = load <4 x i32>, <4 x i32>* %s
+  %s = getelementptr <4 x i32>, ptr %p, i32 1
+  %v = load <4 x i32>, ptr %s
   ret <4 x i32> %v
 }
 
-define <4 x i32> @load_splat_v4i32_with_unfolded_gep_offset(i32* %p) {
+define <4 x i32> @load_splat_v4i32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_splat_v4i32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1699,14 +1699,14 @@ define <4 x i32> @load_splat_v4i32_with_unfolded_gep_offset(i32* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i32, i32* %p, i32 1
-  %e = load i32, i32* %s
+  %s = getelementptr i32, ptr %p, i32 1
+  %e = load i32, ptr %s
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_gep_offset(<4 x i16>* %p) {
+define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i16_to_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_sext_v4i16_to_v4i32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1715,13 +1715,13 @@ define <4 x i32> @load_sext_v4i16_to_v4i32_with_unfolded_gep_offset(<4 x i16>* %
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i32x4.load16x4_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x i16>, <4 x i16>* %p, i32 1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr <4 x i16>, ptr %p, i32 1
+  %v = load <4 x i16>, ptr %s
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_gep_offset(<4 x i16>* %p) {
+define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i16_to_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_zext_v4i16_to_v4i32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1730,13 +1730,13 @@ define <4 x i32> @load_zext_v4i16_to_v4i32_with_unfolded_gep_offset(<4 x i16>* %
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i32x4.load16x4_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x i16>, <4 x i16>* %p, i32 1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr <4 x i16>, ptr %p, i32 1
+  %v = load <4 x i16>, ptr %s
   %v2 = zext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_gep_offset(<4 x i8>* %p) {
+define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v4i8_to_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_sext_v4i8_to_v4i32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:    .local v128
@@ -1752,13 +1752,13 @@ define <4 x i32> @load_sext_v4i8_to_v4i32_with_unfolded_gep_offset(<4 x i8>* %p)
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32x4.shr_s
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x i8>, <4 x i8>* %p, i32 1
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = getelementptr <4 x i8>, ptr %p, i32 1
+  %v = load <4 x i8>, ptr %s
   %v2 = sext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_gep_offset(<4 x i8>* %p) {
+define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v4i8_to_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_zext_v4i8_to_v4i32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1769,13 +1769,13 @@ define <4 x i32> @load_zext_v4i8_to_v4i32_with_unfolded_gep_offset(<4 x i8>* %p)
 ; CHECK-NEXT:    v128.load32_zero 0
 ; CHECK-NEXT:    i8x16.shuffle 16, 1, 2, 3, 17, 5, 6, 7, 18, 9, 10, 11, 19, 13, 14, 15
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x i8>, <4 x i8>* %p, i32 1
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = getelementptr <4 x i8>, ptr %p, i32 1
+  %v = load <4 x i8>, ptr %s
   %v2 = zext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
 
-define <4 x i16> @load_ext_v4i32_with_unfolded_gep_offset(<4 x i16>* %p) {
+define <4 x i16> @load_ext_v4i32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_ext_v4i32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -1784,8 +1784,8 @@ define <4 x i16> @load_ext_v4i32_with_unfolded_gep_offset(<4 x i16>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x i16>, <4 x i16>* %p, i32 1
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = getelementptr <4 x i16>, ptr %p, i32 1
+  %v = load <4 x i16>, ptr %s
   ret <4 x i16> %v
 }
 
@@ -1796,8 +1796,8 @@ define <4 x i32> @load_v4i32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x i32>*
-  %v = load <4 x i32>, <4 x i32>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <4 x i32>, ptr %s
   ret <4 x i32> %v
 }
 
@@ -1808,8 +1808,8 @@ define <4 x i32> @load_splat_v4i32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load32_splat 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to i32*
-  %e = load i32, i32* %s
+  %s = inttoptr i32 32 to ptr
+  %e = load i32, ptr %s
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
@@ -1822,8 +1822,8 @@ define <4 x i32> @load_sext_v4i16_to_v4i32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i32x4.load16x4_s 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <4 x i16>, ptr %s
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
@@ -1835,8 +1835,8 @@ define <4 x i32> @load_zext_v4i16_to_v4i32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i32x4.load16x4_u 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <4 x i16>, ptr %s
   %v2 = zext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
@@ -1855,8 +1855,8 @@ define <4 x i32> @load_sext_v4i8_to_v4i32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32x4.shr_s
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x i8>*
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <4 x i8>, ptr %s
   %v2 = sext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
@@ -1870,8 +1870,8 @@ define <4 x i32> @load_zext_v4i8_to_v4i32_from_numeric_address() {
 ; CHECK-NEXT:    v128.load32_zero 32
 ; CHECK-NEXT:    i8x16.shuffle 16, 1, 2, 3, 17, 5, 6, 7, 18, 9, 10, 11, 19, 13, 14, 15
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x i8>*
-  %v = load <4 x i8>, <4 x i8>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <4 x i8>, ptr %s
   %v2 = zext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
@@ -1883,8 +1883,8 @@ define <4 x i16> @load_ext_v4i32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_zero 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x i16>*
-  %v = load <4 x i16>, <4 x i16>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <4 x i16>, ptr %s
   ret <4 x i16> %v
 }
 
@@ -1896,7 +1896,7 @@ define <4 x i32> @load_v4i32_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load gv_v4i32
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i32>, <4 x i32>* @gv_v4i32
+  %v = load <4 x i32>, ptr @gv_v4i32
   ret <4 x i32> %v
 }
 
@@ -1908,7 +1908,7 @@ define <4 x i32> @load_splat_v4i32_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load32_splat gv_i32
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i32, i32* @gv_i32
+  %e = load i32, ptr @gv_i32
   %v1 = insertelement <4 x i32> undef, i32 %e, i32 0
   %v2 = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %v2
@@ -1922,7 +1922,7 @@ define <4 x i32> @load_sext_v4i16_to_v4i32_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i32x4.load16x4_s gv_v4i16
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* @gv_v4i16
+  %v = load <4 x i16>, ptr @gv_v4i16
   %v2 = sext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
@@ -1934,7 +1934,7 @@ define <4 x i32> @load_zext_v4i16_to_v4i32_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i32x4.load16x4_u gv_v4i16
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* @gv_v4i16
+  %v = load <4 x i16>, ptr @gv_v4i16
   %v2 = zext <4 x i16> %v to <4 x i32>
   ret <4 x i32> %v2
 }
@@ -1954,7 +1954,7 @@ define <4 x i32> @load_sext_v4i8_to_v4i32_from_global_address() {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32x4.shr_s
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i8>, <4 x i8>* @gv_v4i8
+  %v = load <4 x i8>, ptr @gv_v4i8
   %v2 = sext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
@@ -1968,7 +1968,7 @@ define <4 x i32> @load_zext_v4i8_to_v4i32_from_global_address() {
 ; CHECK-NEXT:    v128.load32_zero gv_v4i8
 ; CHECK-NEXT:    i8x16.shuffle 16, 1, 2, 3, 17, 5, 6, 7, 18, 9, 10, 11, 19, 13, 14, 15
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i8>, <4 x i8>* @gv_v4i8
+  %v = load <4 x i8>, ptr @gv_v4i8
   %v2 = zext <4 x i8> %v to <4 x i32>
   ret <4 x i32> %v2
 }
@@ -1980,11 +1980,11 @@ define <4 x i16> @load_ext_v4i32_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_zero gv_v4i16
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x i16>, <4 x i16>* @gv_v4i16
+  %v = load <4 x i16>, ptr @gv_v4i16
   ret <4 x i16> %v
 }
 
-define void @store_v4i32(<4 x i32> %v, <4 x i32>* %p) {
+define void @store_v4i32(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_v4i32:
 ; CHECK:         .functype store_v4i32 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -1992,11 +1992,11 @@ define void @store_v4i32(<4 x i32> %v, <4 x i32>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x i32> %v , <4 x i32>* %p
+  store <4 x i32> %v , ptr %p
   ret void
 }
 
-define void @store_narrowing_v4i32(<4 x i16> %v, <4 x i16>* %p) {
+define void @store_narrowing_v4i32(<4 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v4i32:
 ; CHECK:         .functype store_narrowing_v4i32 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2004,11 +2004,11 @@ define void @store_narrowing_v4i32(<4 x i16> %v, <4 x i16>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x i16> %v , <4 x i16>* %p
+  store <4 x i16> %v , ptr %p
   ret void
 }
 
-define void @store_v4i32_with_folded_offset(<4 x i32> %v, <4 x i32>* %p) {
+define void @store_v4i32_with_folded_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_v4i32_with_folded_offset:
 ; CHECK:         .functype store_v4i32_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2016,14 +2016,14 @@ define void @store_v4i32_with_folded_offset(<4 x i32> %v, <4 x i32>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i32>*
-  store <4 x i32> %v , <4 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  store <4 x i32> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v4i32_with_folded_offset(<4 x i16> %v, <4 x i16>* %p) {
+define void @store_narrowing_v4i32_with_folded_offset(<4 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v4i32_with_folded_offset:
 ; CHECK:         .functype store_narrowing_v4i32_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2031,14 +2031,14 @@ define void @store_narrowing_v4i32_with_folded_offset(<4 x i16> %v, <4 x i16>* %
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 16, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i16>*
-  store <4 x i16> %v , <4 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  store <4 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_v4i32_with_folded_gep_offset(<4 x i32> %v, <4 x i32>* %p) {
+define void @store_v4i32_with_folded_gep_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype store_v4i32_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2046,12 +2046,12 @@ define void @store_v4i32_with_folded_gep_offset(<4 x i32> %v, <4 x i32>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 1
-  store <4 x i32> %v , <4 x i32>* %s
+  %s = getelementptr inbounds <4 x i32>, ptr %p, i32 1
+  store <4 x i32> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v4i32_with_folded_gep_offset(<4 x i16> %v, <4 x i16>* %p) {
+define void @store_narrowing_v4i32_with_folded_gep_offset(<4 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v4i32_with_folded_gep_offset:
 ; CHECK:         .functype store_narrowing_v4i32_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2059,12 +2059,12 @@ define void @store_narrowing_v4i32_with_folded_gep_offset(<4 x i16> %v, <4 x i16
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 8, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 1
-  store <4 x i16> %v , <4 x i16>* %s
+  %s = getelementptr inbounds <4 x i16>, ptr %p, i32 1
+  store <4 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_v4i32_with_unfolded_gep_negative_offset(<4 x i32> %v, <4 x i32>* %p) {
+define void @store_v4i32_with_unfolded_gep_negative_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_v4i32_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2074,12 +2074,12 @@ define void @store_v4i32_with_unfolded_gep_negative_offset(<4 x i32> %v, <4 x i3
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i32>, <4 x i32>* %p, i32 -1
-  store <4 x i32> %v , <4 x i32>* %s
+  %s = getelementptr inbounds <4 x i32>, ptr %p, i32 -1
+  store <4 x i32> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v4i32_with_unfolded_gep_negative_offset(<4 x i16> %v, <4 x i16>* %p) {
+define void @store_narrowing_v4i32_with_unfolded_gep_negative_offset(<4 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v4i32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_narrowing_v4i32_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2089,12 +2089,12 @@ define void @store_narrowing_v4i32_with_unfolded_gep_negative_offset(<4 x i16> %
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 -1
-  store <4 x i16> %v , <4 x i16>* %s
+  %s = getelementptr inbounds <4 x i16>, ptr %p, i32 -1
+  store <4 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_v4i32_with_unfolded_offset(<4 x i32> %v, <4 x i32>* %p) {
+define void @store_v4i32_with_unfolded_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_v4i32_with_unfolded_offset:
 ; CHECK:         .functype store_v4i32_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2104,14 +2104,14 @@ define void @store_v4i32_with_unfolded_offset(<4 x i32> %v, <4 x i32>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i32>*
-  store <4 x i32> %v , <4 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  store <4 x i32> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v4i32_with_unfolded_offset(<4 x i16> %v, <4 x i16>* %p) {
+define void @store_narrowing_v4i32_with_unfolded_offset(<4 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v4i32_with_unfolded_offset:
 ; CHECK:         .functype store_narrowing_v4i32_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2121,14 +2121,14 @@ define void @store_narrowing_v4i32_with_unfolded_offset(<4 x i16> %v, <4 x i16>*
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x i16>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x i16>*
-  store <4 x i16> %v , <4 x i16>* %s
+  %s = inttoptr i32 %r to ptr
+  store <4 x i16> %v , ptr %s
   ret void
 }
 
-define void @store_v4i32_with_unfolded_gep_offset(<4 x i32> %v, <4 x i32>* %p) {
+define void @store_v4i32_with_unfolded_gep_offset(<4 x i32> %v, ptr %p) {
 ; CHECK-LABEL: store_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype store_v4i32_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2138,12 +2138,12 @@ define void @store_v4i32_with_unfolded_gep_offset(<4 x i32> %v, <4 x i32>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x i32>, <4 x i32>* %p, i32 1
-  store <4 x i32> %v , <4 x i32>* %s
+  %s = getelementptr <4 x i32>, ptr %p, i32 1
+  store <4 x i32> %v , ptr %s
   ret void
 }
 
-define void @store_narrowing_v4i32_with_unfolded_gep_offset(<4 x i16> %v, <4 x i16>* %p) {
+define void @store_narrowing_v4i32_with_unfolded_gep_offset(<4 x i16> %v, ptr %p) {
 ; CHECK-LABEL: store_narrowing_v4i32_with_unfolded_gep_offset:
 ; CHECK:         .functype store_narrowing_v4i32_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2153,8 +2153,8 @@ define void @store_narrowing_v4i32_with_unfolded_gep_offset(<4 x i16> %v, <4 x i
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x i16>, <4 x i16>* %p, i32 1
-  store <4 x i16> %v , <4 x i16>* %s
+  %s = getelementptr <4 x i16>, ptr %p, i32 1
+  store <4 x i16> %v , ptr %s
   ret void
 }
 
@@ -2166,8 +2166,8 @@ define void @store_v4i32_to_numeric_address(<4 x i32> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x i32>*
-  store <4 x i32> %v , <4 x i32>* %s
+  %s = inttoptr i32 32 to ptr
+  store <4 x i32> %v , ptr %s
   ret void
 }
 
@@ -2179,8 +2179,8 @@ define void @store_narrowing_v4i32_to_numeric_address(<4 x i16> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane 32, 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x i16>*
-  store <4 x i16> %v , <4 x i16>* %s
+  %s = inttoptr i32 32 to ptr
+  store <4 x i16> %v , ptr %s
   ret void
 }
 
@@ -2192,7 +2192,7 @@ define void @store_v4i32_to_global_address(<4 x i32> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store gv_v4i32
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x i32> %v , <4 x i32>* @gv_v4i32
+  store <4 x i32> %v , ptr @gv_v4i32
   ret void
 }
 
@@ -2204,211 +2204,211 @@ define void @store_narrowing_v4i32_to_global_address(<4 x i16> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store64_lane gv_v4i16, 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x i16> %v , <4 x i16>* @gv_v4i16
+  store <4 x i16> %v , ptr @gv_v4i16
   ret void
 }
 
 ; ==============================================================================
 ; 2 x i64
 ; ==============================================================================
-define <2 x i64> @load_v2i64(<2 x i64>* %p) {
+define <2 x i64> @load_v2i64(ptr %p) {
 ; CHECK-LABEL: load_v2i64:
 ; CHECK:         .functype load_v2i64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i64>, <2 x i64>* %p
+  %v = load <2 x i64>, ptr %p
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_splat_v2i64(i64* %p) {
+define <2 x i64> @load_splat_v2i64(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64:
 ; CHECK:         .functype load_splat_v2i64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* %p
+  %e = load i64, ptr %p
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_sext_v2i64(<2 x i32>* %p) {
+define <2 x i64> @load_sext_v2i64(ptr %p) {
 ; CHECK-LABEL: load_sext_v2i64:
 ; CHECK:         .functype load_sext_v2i64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i64x2.load32x2_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i32>, <2 x i32>* %p
+  %v = load <2 x i32>, ptr %p
   %v2 = sext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_zext_v2i64(<2 x i32>* %p) {
+define <2 x i64> @load_zext_v2i64(ptr %p) {
 ; CHECK-LABEL: load_zext_v2i64:
 ; CHECK:         .functype load_zext_v2i64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i64x2.load32x2_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i32>, <2 x i32>* %p
+  %v = load <2 x i32>, ptr %p
   %v2 = zext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i32> @load_ext_v2i64(<2 x i32>* %p) {
+define <2 x i32> @load_ext_v2i64(ptr %p) {
 ; CHECK-LABEL: load_ext_v2i64:
 ; CHECK:         .functype load_ext_v2i64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i32>, <2 x i32>* %p
+  %v = load <2 x i32>, ptr %p
   ret <2 x i32> %v
 }
 
-define <2 x i64> @load_v2i64_with_folded_offset(<2 x i64>* %p) {
+define <2 x i64> @load_v2i64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_v2i64_with_folded_offset:
 ; CHECK:         .functype load_v2i64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i64>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i64>*
-  %v = load <2 x i64>, <2 x i64>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x i64>, ptr %s
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_splat_v2i64_with_folded_offset(i64* %p) {
+define <2 x i64> @load_splat_v2i64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_with_folded_offset:
 ; CHECK:         .functype load_splat_v2i64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i64* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to i64*
-  %e = load i64, i64* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load i64, ptr %s
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_sext_v2i64_with_folded_offset(<2 x i32>* %p) {
+define <2 x i64> @load_sext_v2i64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v2i64_with_folded_offset:
 ; CHECK:         .functype load_sext_v2i64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i64x2.load32x2_s 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x i32>, ptr %s
   %v2 = sext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_zext_v2i64_with_folded_offset(<2 x i32>* %p) {
+define <2 x i64> @load_zext_v2i64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v2i64_with_folded_offset:
 ; CHECK:         .functype load_zext_v2i64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i64x2.load32x2_u 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x i32>, ptr %s
   %v2 = zext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i32> @load_ext_v2i64_with_folded_offset(<2 x i32>* %p) {
+define <2 x i32> @load_ext_v2i64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v2i64_with_folded_offset:
 ; CHECK:         .functype load_ext_v2i64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x i32>, ptr %s
   ret <2 x i32> %v
 }
 
-define <2 x i64> @load_v2i64_with_folded_gep_offset(<2 x i64>* %p) {
+define <2 x i64> @load_v2i64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v2i64_with_folded_gep_offset:
 ; CHECK:         .functype load_v2i64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 1
-  %v = load <2 x i64>, <2 x i64>* %s
+  %s = getelementptr inbounds <2 x i64>, ptr %p, i32 1
+  %v = load <2 x i64>, ptr %s
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_splat_v2i64_with_folded_gep_offset(i64* %p) {
+define <2 x i64> @load_splat_v2i64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_with_folded_gep_offset:
 ; CHECK:         .functype load_splat_v2i64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i64, i64* %p, i32 1
-  %e = load i64, i64* %s
+  %s = getelementptr inbounds i64, ptr %p, i32 1
+  %e = load i64, ptr %s
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_sext_v2i64_with_folded_gep_offset(<2 x i32>* %p) {
+define <2 x i64> @load_sext_v2i64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v2i64_with_folded_gep_offset:
 ; CHECK:         .functype load_sext_v2i64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i64x2.load32x2_s 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i32 1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr inbounds <2 x i32>, ptr %p, i32 1
+  %v = load <2 x i32>, ptr %s
   %v2 = sext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_zext_v2i64_with_folded_gep_offset(<2 x i32>* %p) {
+define <2 x i64> @load_zext_v2i64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v2i64_with_folded_gep_offset:
 ; CHECK:         .functype load_zext_v2i64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i64x2.load32x2_u 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i32 1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr inbounds <2 x i32>, ptr %p, i32 1
+  %v = load <2 x i32>, ptr %s
   %v2 = zext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i32> @load_ext_v2i64_with_folded_gep_offset(<2 x i32>* %p) {
+define <2 x i32> @load_ext_v2i64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v2i64_with_folded_gep_offset:
 ; CHECK:         .functype load_ext_v2i64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_zero 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i32 1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr inbounds <2 x i32>, ptr %p, i32 1
+  %v = load <2 x i32>, ptr %s
   ret <2 x i32> %v
 }
 
-define <2 x i64> @load_v2i64_with_unfolded_gep_negative_offset(<2 x i64>* %p) {
+define <2 x i64> @load_v2i64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_v2i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2417,12 +2417,12 @@ define <2 x i64> @load_v2i64_with_unfolded_gep_negative_offset(<2 x i64>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 -1
-  %v = load <2 x i64>, <2 x i64>* %s
+  %s = getelementptr inbounds <2 x i64>, ptr %p, i32 -1
+  %v = load <2 x i64>, ptr %s
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_splat_v2i64_with_unfolded_gep_negative_offset(i64* %p) {
+define <2 x i64> @load_splat_v2i64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_splat_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2431,14 +2431,14 @@ define <2 x i64> @load_splat_v2i64_with_unfolded_gep_negative_offset(i64* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds i64, i64* %p, i32 -1
-  %e = load i64, i64* %s
+  %s = getelementptr inbounds i64, ptr %p, i32 -1
+  %e = load i64, ptr %s
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_sext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* %p) {
+define <2 x i64> @load_sext_v2i64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v2i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_sext_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2447,13 +2447,13 @@ define <2 x i64> @load_sext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* %
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i64x2.load32x2_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i32 -1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr inbounds <2 x i32>, ptr %p, i32 -1
+  %v = load <2 x i32>, ptr %s
   %v2 = sext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_zext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* %p) {
+define <2 x i64> @load_zext_v2i64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v2i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_zext_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2462,13 +2462,13 @@ define <2 x i64> @load_zext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* %
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i64x2.load32x2_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i32 -1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr inbounds <2 x i32>, ptr %p, i32 -1
+  %v = load <2 x i32>, ptr %s
   %v2 = zext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i32> @load_ext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* %p) {
+define <2 x i32> @load_ext_v2i64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v2i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_ext_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2477,12 +2477,12 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* %p
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i32 -1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr inbounds <2 x i32>, ptr %p, i32 -1
+  %v = load <2 x i32>, ptr %s
   ret <2 x i32> %v
 }
 
-define <2 x i64> @load_v2i64_with_unfolded_offset(<2 x i64>* %p) {
+define <2 x i64> @load_v2i64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_v2i64_with_unfolded_offset:
 ; CHECK:         .functype load_v2i64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2491,14 +2491,14 @@ define <2 x i64> @load_v2i64_with_unfolded_offset(<2 x i64>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i64>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i64>*
-  %v = load <2 x i64>, <2 x i64>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x i64>, ptr %s
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_splat_v2i64_with_unfolded_offset(i64* %p) {
+define <2 x i64> @load_splat_v2i64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_with_unfolded_offset:
 ; CHECK:         .functype load_splat_v2i64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2507,16 +2507,16 @@ define <2 x i64> @load_splat_v2i64_with_unfolded_offset(i64* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint i64* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to i64*
-  %e = load i64, i64* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load i64, ptr %s
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_sext_v2i64_with_unfolded_offset(<2 x i32>* %p) {
+define <2 x i64> @load_sext_v2i64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v2i64_with_unfolded_offset:
 ; CHECK:         .functype load_sext_v2i64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2525,15 +2525,15 @@ define <2 x i64> @load_sext_v2i64_with_unfolded_offset(<2 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i64x2.load32x2_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x i32>, ptr %s
   %v2 = sext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_zext_v2i64_with_unfolded_offset(<2 x i32>* %p) {
+define <2 x i64> @load_zext_v2i64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v2i64_with_unfolded_offset:
 ; CHECK:         .functype load_zext_v2i64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2542,15 +2542,15 @@ define <2 x i64> @load_zext_v2i64_with_unfolded_offset(<2 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i64x2.load32x2_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x i32>, ptr %s
   %v2 = zext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i32> @load_ext_v2i64_with_unfolded_offset(<2 x i32>* %p) {
+define <2 x i32> @load_ext_v2i64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v2i64_with_unfolded_offset:
 ; CHECK:         .functype load_ext_v2i64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2559,14 +2559,14 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_offset(<2 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i32>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x i32>, ptr %s
   ret <2 x i32> %v
 }
 
-define <2 x i64> @load_v2i64_with_unfolded_gep_offset(<2 x i64>* %p) {
+define <2 x i64> @load_v2i64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v2i64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_v2i64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2575,12 +2575,12 @@ define <2 x i64> @load_v2i64_with_unfolded_gep_offset(<2 x i64>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <2 x i64>, <2 x i64>* %p, i32 1
-  %v = load <2 x i64>, <2 x i64>* %s
+  %s = getelementptr <2 x i64>, ptr %p, i32 1
+  %v = load <2 x i64>, ptr %s
   ret <2 x i64> %v
 }
 
-define <2 x i64> @load_splat_v2i64_with_unfolded_gep_offset(i64* %p) {
+define <2 x i64> @load_splat_v2i64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2i64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_splat_v2i64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2589,14 +2589,14 @@ define <2 x i64> @load_splat_v2i64_with_unfolded_gep_offset(i64* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr i64, i64* %p, i32 1
-  %e = load i64, i64* %s
+  %s = getelementptr i64, ptr %p, i32 1
+  %e = load i64, ptr %s
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_sext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) {
+define <2 x i64> @load_sext_v2i64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_sext_v2i64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_sext_v2i64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2605,13 +2605,13 @@ define <2 x i64> @load_sext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i64x2.load32x2_s 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <2 x i32>, <2 x i32>* %p, i32 1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr <2 x i32>, ptr %p, i32 1
+  %v = load <2 x i32>, ptr %s
   %v2 = sext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i64> @load_zext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) {
+define <2 x i64> @load_zext_v2i64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_zext_v2i64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_zext_v2i64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2620,13 +2620,13 @@ define <2 x i64> @load_zext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    i64x2.load32x2_u 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <2 x i32>, <2 x i32>* %p, i32 1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr <2 x i32>, ptr %p, i32 1
+  %v = load <2 x i32>, ptr %s
   %v2 = zext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
 
-define <2 x i32> @load_ext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) {
+define <2 x i32> @load_ext_v2i64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_ext_v2i64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_ext_v2i64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2635,8 +2635,8 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <2 x i32>, <2 x i32>* %p, i32 1
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = getelementptr <2 x i32>, ptr %p, i32 1
+  %v = load <2 x i32>, ptr %s
   ret <2 x i32> %v
 }
 
@@ -2647,8 +2647,8 @@ define <2 x i64> @load_v2i64_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <2 x i64>*
-  %v = load <2 x i64>, <2 x i64>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <2 x i64>, ptr %s
   ret <2 x i64> %v
 }
 
@@ -2659,8 +2659,8 @@ define <2 x i64> @load_splat_v2i64_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_splat 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to i64*
-  %e = load i64, i64* %s
+  %s = inttoptr i32 32 to ptr
+  %e = load i64, ptr %s
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
@@ -2673,8 +2673,8 @@ define <2 x i64> @load_sext_v2i64_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i64x2.load32x2_s 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <2 x i32>, ptr %s
   %v2 = sext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
@@ -2686,8 +2686,8 @@ define <2 x i64> @load_zext_v2i64_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i64x2.load32x2_u 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <2 x i32>, ptr %s
   %v2 = zext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
@@ -2699,8 +2699,8 @@ define <2 x i32> @load_ext_v2i64_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_zero 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <2 x i32>*
-  %v = load <2 x i32>, <2 x i32>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <2 x i32>, ptr %s
   ret <2 x i32> %v
 }
 
@@ -2712,7 +2712,7 @@ define <2 x i64> @load_v2i64_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load gv_v2i64
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i64>, <2 x i64>* @gv_v2i64
+  %v = load <2 x i64>, ptr @gv_v2i64
   ret <2 x i64> %v
 }
 
@@ -2724,7 +2724,7 @@ define <2 x i64> @load_splat_v2i64_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_splat gv_i64
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load i64, i64* @gv_i64
+  %e = load i64, ptr @gv_i64
   %v1 = insertelement <2 x i64> undef, i64 %e, i32 0
   %v2 = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %v2
@@ -2738,7 +2738,7 @@ define <2 x i64> @load_sext_v2i64_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i64x2.load32x2_s gv_v2i32
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i32>, <2 x i32>* @gv_v2i32
+  %v = load <2 x i32>, ptr @gv_v2i32
   %v2 = sext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
@@ -2750,7 +2750,7 @@ define <2 x i64> @load_zext_v2i64_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    i64x2.load32x2_u gv_v2i32
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i32>, <2 x i32>* @gv_v2i32
+  %v = load <2 x i32>, ptr @gv_v2i32
   %v2 = zext <2 x i32> %v to <2 x i64>
   ret <2 x i64> %v2
 }
@@ -2762,11 +2762,11 @@ define <2 x i32> @load_ext_v2i64_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_zero gv_v2i32
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x i32>, <2 x i32>* @gv_v2i32
+  %v = load <2 x i32>, ptr @gv_v2i32
   ret <2 x i32> %v
 }
 
-define void @store_v2i64(<2 x i64> %v, <2 x i64>* %p) {
+define void @store_v2i64(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_v2i64:
 ; CHECK:         .functype store_v2i64 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2774,11 +2774,11 @@ define void @store_v2i64(<2 x i64> %v, <2 x i64>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x i64> %v , <2 x i64>* %p
+  store <2 x i64> %v , ptr %p
   ret void
 }
 
-define void @store_v2i64_with_folded_offset(<2 x i64> %v, <2 x i64>* %p) {
+define void @store_v2i64_with_folded_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_v2i64_with_folded_offset:
 ; CHECK:         .functype store_v2i64_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2786,14 +2786,14 @@ define void @store_v2i64_with_folded_offset(<2 x i64> %v, <2 x i64>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i64>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i64>*
-  store <2 x i64> %v , <2 x i64>* %s
+  %s = inttoptr i32 %r to ptr
+  store <2 x i64> %v , ptr %s
   ret void
 }
 
-define void @store_v2i64_with_folded_gep_offset(<2 x i64> %v, <2 x i64>* %p) {
+define void @store_v2i64_with_folded_gep_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_v2i64_with_folded_gep_offset:
 ; CHECK:         .functype store_v2i64_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2801,12 +2801,12 @@ define void @store_v2i64_with_folded_gep_offset(<2 x i64> %v, <2 x i64>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 1
-  store <2 x i64> %v , <2 x i64>* %s
+  %s = getelementptr inbounds <2 x i64>, ptr %p, i32 1
+  store <2 x i64> %v , ptr %s
   ret void
 }
 
-define void @store_v2i64_with_unfolded_gep_negative_offset(<2 x i64> %v, <2 x i64>* %p) {
+define void @store_v2i64_with_unfolded_gep_negative_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_v2i64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_v2i64_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2816,12 +2816,12 @@ define void @store_v2i64_with_unfolded_gep_negative_offset(<2 x i64> %v, <2 x i6
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x i64>, <2 x i64>* %p, i32 -1
-  store <2 x i64> %v , <2 x i64>* %s
+  %s = getelementptr inbounds <2 x i64>, ptr %p, i32 -1
+  store <2 x i64> %v , ptr %s
   ret void
 }
 
-define void @store_v2i64_with_unfolded_offset(<2 x i64> %v, <2 x i64>* %p) {
+define void @store_v2i64_with_unfolded_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_v2i64_with_unfolded_offset:
 ; CHECK:         .functype store_v2i64_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2831,14 +2831,14 @@ define void @store_v2i64_with_unfolded_offset(<2 x i64> %v, <2 x i64>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x i64>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x i64>*
-  store <2 x i64> %v , <2 x i64>* %s
+  %s = inttoptr i32 %r to ptr
+  store <2 x i64> %v , ptr %s
   ret void
 }
 
-define void @store_v2i64_with_unfolded_gep_offset(<2 x i64> %v, <2 x i64>* %p) {
+define void @store_v2i64_with_unfolded_gep_offset(<2 x i64> %v, ptr %p) {
 ; CHECK-LABEL: store_v2i64_with_unfolded_gep_offset:
 ; CHECK:         .functype store_v2i64_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -2848,8 +2848,8 @@ define void @store_v2i64_with_unfolded_gep_offset(<2 x i64> %v, <2 x i64>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <2 x i64>, <2 x i64>* %p, i32 1
-  store <2 x i64> %v , <2 x i64>* %s
+  %s = getelementptr <2 x i64>, ptr %p, i32 1
+  store <2 x i64> %v , ptr %s
   ret void
 }
 
@@ -2861,8 +2861,8 @@ define void @store_v2i64_to_numeric_address(<2 x i64> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <2 x i64>*
-  store <2 x i64> %v , <2 x i64>* %s
+  %s = inttoptr i32 32 to ptr
+  store <2 x i64> %v , ptr %s
   ret void
 }
 
@@ -2874,94 +2874,94 @@ define void @store_v2i64_to_global_address(<2 x i64> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store gv_v2i64
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x i64> %v , <2 x i64>* @gv_v2i64
+  store <2 x i64> %v , ptr @gv_v2i64
   ret void
 }
 
 ; ==============================================================================
 ; 4 x float
 ; ==============================================================================
-define <4 x float> @load_v4f32(<4 x float>* %p) {
+define <4 x float> @load_v4f32(ptr %p) {
 ; CHECK-LABEL: load_v4f32:
 ; CHECK:         .functype load_v4f32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x float>, <4 x float>* %p
+  %v = load <4 x float>, ptr %p
   ret <4 x float> %v
 }
 
-define <4 x float> @load_splat_v4f32(float* %p) {
+define <4 x float> @load_splat_v4f32(ptr %p) {
 ; CHECK-LABEL: load_splat_v4f32:
 ; CHECK:         .functype load_splat_v4f32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load float, float* %p
+  %e = load float, ptr %p
   %v1 = insertelement <4 x float> undef, float %e, i32 0
   %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %v2
 }
 
-define <4 x float> @load_v4f32_with_folded_offset(<4 x float>* %p) {
+define <4 x float> @load_v4f32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_v4f32_with_folded_offset:
 ; CHECK:         .functype load_v4f32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x float>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x float>*
-  %v = load <4 x float>, <4 x float>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x float>, ptr %s
   ret <4 x float> %v
 }
 
-define <4 x float> @load_splat_v4f32_with_folded_offset(float* %p) {
+define <4 x float> @load_splat_v4f32_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4f32_with_folded_offset:
 ; CHECK:         .functype load_splat_v4f32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint float* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to float*
-  %e = load float, float* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load float, ptr %s
   %v1 = insertelement <4 x float> undef, float %e, i32 0
   %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %v2
 }
 
-define <4 x float> @load_v4f32_with_folded_gep_offset(<4 x float>* %p) {
+define <4 x float> @load_v4f32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v4f32_with_folded_gep_offset:
 ; CHECK:         .functype load_v4f32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 1
-  %v = load <4 x float>, <4 x float>* %s
+  %s = getelementptr inbounds <4 x float>, ptr %p, i32 1
+  %v = load <4 x float>, ptr %s
   ret <4 x float> %v
 }
 
-define <4 x float> @load_splat_v4f32_with_folded_gep_offset(float* %p) {
+define <4 x float> @load_splat_v4f32_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4f32_with_folded_gep_offset:
 ; CHECK:         .functype load_splat_v4f32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load32_splat 4
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds float, float* %p, i32 1
-  %e = load float, float* %s
+  %s = getelementptr inbounds float, ptr %p, i32 1
+  %e = load float, ptr %s
   %v1 = insertelement <4 x float> undef, float %e, i32 0
   %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %v2
 }
 
-define <4 x float> @load_v4f32_with_unfolded_gep_negative_offset(<4 x float>* %p) {
+define <4 x float> @load_v4f32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_v4f32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_v4f32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2970,12 +2970,12 @@ define <4 x float> @load_v4f32_with_unfolded_gep_negative_offset(<4 x float>* %p
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 -1
-  %v = load <4 x float>, <4 x float>* %s
+  %s = getelementptr inbounds <4 x float>, ptr %p, i32 -1
+  %v = load <4 x float>, ptr %s
   ret <4 x float> %v
 }
 
-define <4 x float> @load_splat_v4f32_with_unfolded_gep_negative_offset(float* %p) {
+define <4 x float> @load_splat_v4f32_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4f32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_splat_v4f32_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -2984,14 +2984,14 @@ define <4 x float> @load_splat_v4f32_with_unfolded_gep_negative_offset(float* %p
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds float, float* %p, i32 -1
-  %e = load float, float* %s
+  %s = getelementptr inbounds float, ptr %p, i32 -1
+  %e = load float, ptr %s
   %v1 = insertelement <4 x float> undef, float %e, i32 0
   %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %v2
 }
 
-define <4 x float> @load_v4f32_with_unfolded_offset(<4 x float>* %p) {
+define <4 x float> @load_v4f32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_v4f32_with_unfolded_offset:
 ; CHECK:         .functype load_v4f32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3000,14 +3000,14 @@ define <4 x float> @load_v4f32_with_unfolded_offset(<4 x float>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x float>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x float>*
-  %v = load <4 x float>, <4 x float>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <4 x float>, ptr %s
   ret <4 x float> %v
 }
 
-define <4 x float> @load_splat_v4f32_with_unfolded_offset(float* %p) {
+define <4 x float> @load_splat_v4f32_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4f32_with_unfolded_offset:
 ; CHECK:         .functype load_splat_v4f32_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3016,16 +3016,16 @@ define <4 x float> @load_splat_v4f32_with_unfolded_offset(float* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint float* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to float*
-  %e = load float, float* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load float, ptr %s
   %v1 = insertelement <4 x float> undef, float %e, i32 0
   %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %v2
 }
 
-define <4 x float> @load_v4f32_with_unfolded_gep_offset(<4 x float>* %p) {
+define <4 x float> @load_v4f32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v4f32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_v4f32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3034,12 +3034,12 @@ define <4 x float> @load_v4f32_with_unfolded_gep_offset(<4 x float>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x float>, <4 x float>* %p, i32 1
-  %v = load <4 x float>, <4 x float>* %s
+  %s = getelementptr <4 x float>, ptr %p, i32 1
+  %v = load <4 x float>, ptr %s
   ret <4 x float> %v
 }
 
-define <4 x float> @load_splat_v4f32_with_unfolded_gep_offset(float* %p) {
+define <4 x float> @load_splat_v4f32_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v4f32_with_unfolded_gep_offset:
 ; CHECK:         .functype load_splat_v4f32_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3048,8 +3048,8 @@ define <4 x float> @load_splat_v4f32_with_unfolded_gep_offset(float* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load32_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr float, float* %p, i32 1
-  %e = load float, float* %s
+  %s = getelementptr float, ptr %p, i32 1
+  %e = load float, ptr %s
   %v1 = insertelement <4 x float> undef, float %e, i32 0
   %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %v2
@@ -3062,8 +3062,8 @@ define <4 x float> @load_v4f32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x float>*
-  %v = load <4 x float>, <4 x float>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <4 x float>, ptr %s
   ret <4 x float> %v
 }
 
@@ -3074,8 +3074,8 @@ define <4 x float> @load_splat_v4f32_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load32_splat 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to float*
-  %e = load float, float* %s
+  %s = inttoptr i32 32 to ptr
+  %e = load float, ptr %s
   %v1 = insertelement <4 x float> undef, float %e, i32 0
   %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %v2
@@ -3089,7 +3089,7 @@ define <4 x float> @load_v4f32_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load gv_v4f32
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <4 x float>, <4 x float>* @gv_v4f32
+  %v = load <4 x float>, ptr @gv_v4f32
   ret <4 x float> %v
 }
 
@@ -3101,13 +3101,13 @@ define <4 x float> @load_splat_v4f32_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load32_splat gv_f32
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load float, float* @gv_f32
+  %e = load float, ptr @gv_f32
   %v1 = insertelement <4 x float> undef, float %e, i32 0
   %v2 = shufflevector <4 x float> %v1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %v2
 }
 
-define void @store_v4f32(<4 x float> %v, <4 x float>* %p) {
+define void @store_v4f32(<4 x float> %v, ptr %p) {
 ; CHECK-LABEL: store_v4f32:
 ; CHECK:         .functype store_v4f32 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3115,11 +3115,11 @@ define void @store_v4f32(<4 x float> %v, <4 x float>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x float> %v , <4 x float>* %p
+  store <4 x float> %v , ptr %p
   ret void
 }
 
-define void @store_v4f32_with_folded_offset(<4 x float> %v, <4 x float>* %p) {
+define void @store_v4f32_with_folded_offset(<4 x float> %v, ptr %p) {
 ; CHECK-LABEL: store_v4f32_with_folded_offset:
 ; CHECK:         .functype store_v4f32_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3127,14 +3127,14 @@ define void @store_v4f32_with_folded_offset(<4 x float> %v, <4 x float>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x float>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x float>*
-  store <4 x float> %v , <4 x float>* %s
+  %s = inttoptr i32 %r to ptr
+  store <4 x float> %v , ptr %s
   ret void
 }
 
-define void @store_v4f32_with_folded_gep_offset(<4 x float> %v, <4 x float>* %p) {
+define void @store_v4f32_with_folded_gep_offset(<4 x float> %v, ptr %p) {
 ; CHECK-LABEL: store_v4f32_with_folded_gep_offset:
 ; CHECK:         .functype store_v4f32_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3142,12 +3142,12 @@ define void @store_v4f32_with_folded_gep_offset(<4 x float> %v, <4 x float>* %p)
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 1
-  store <4 x float> %v , <4 x float>* %s
+  %s = getelementptr inbounds <4 x float>, ptr %p, i32 1
+  store <4 x float> %v , ptr %s
   ret void
 }
 
-define void @store_v4f32_with_unfolded_gep_negative_offset(<4 x float> %v, <4 x float>* %p) {
+define void @store_v4f32_with_unfolded_gep_negative_offset(<4 x float> %v, ptr %p) {
 ; CHECK-LABEL: store_v4f32_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_v4f32_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3157,12 +3157,12 @@ define void @store_v4f32_with_unfolded_gep_negative_offset(<4 x float> %v, <4 x
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <4 x float>, <4 x float>* %p, i32 -1
-  store <4 x float> %v , <4 x float>* %s
+  %s = getelementptr inbounds <4 x float>, ptr %p, i32 -1
+  store <4 x float> %v , ptr %s
   ret void
 }
 
-define void @store_v4f32_with_unfolded_offset(<4 x float> %v, <4 x float>* %p) {
+define void @store_v4f32_with_unfolded_offset(<4 x float> %v, ptr %p) {
 ; CHECK-LABEL: store_v4f32_with_unfolded_offset:
 ; CHECK:         .functype store_v4f32_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3172,14 +3172,14 @@ define void @store_v4f32_with_unfolded_offset(<4 x float> %v, <4 x float>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <4 x float>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <4 x float>*
-  store <4 x float> %v , <4 x float>* %s
+  %s = inttoptr i32 %r to ptr
+  store <4 x float> %v , ptr %s
   ret void
 }
 
-define void @store_v4f32_with_unfolded_gep_offset(<4 x float> %v, <4 x float>* %p) {
+define void @store_v4f32_with_unfolded_gep_offset(<4 x float> %v, ptr %p) {
 ; CHECK-LABEL: store_v4f32_with_unfolded_gep_offset:
 ; CHECK:         .functype store_v4f32_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3189,8 +3189,8 @@ define void @store_v4f32_with_unfolded_gep_offset(<4 x float> %v, <4 x float>* %
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <4 x float>, <4 x float>* %p, i32 1
-  store <4 x float> %v , <4 x float>* %s
+  %s = getelementptr <4 x float>, ptr %p, i32 1
+  store <4 x float> %v , ptr %s
   ret void
 }
 
@@ -3202,8 +3202,8 @@ define void @store_v4f32_to_numeric_address(<4 x float> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <4 x float>*
-  store <4 x float> %v , <4 x float>* %s
+  %s = inttoptr i32 32 to ptr
+  store <4 x float> %v , ptr %s
   ret void
 }
 
@@ -3215,38 +3215,38 @@ define void @store_v4f32_to_global_address(<4 x float> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store gv_v4f32
 ; CHECK-NEXT:    # fallthrough-return
-  store <4 x float> %v , <4 x float>* @gv_v4f32
+  store <4 x float> %v , ptr @gv_v4f32
   ret void
 }
 
 ; ==============================================================================
 ; 2 x double
 ; ==============================================================================
-define <2 x double> @load_v2f64(<2 x double>* %p) {
+define <2 x double> @load_v2f64(ptr %p) {
 ; CHECK-LABEL: load_v2f64:
 ; CHECK:         .functype load_v2f64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x double>, <2 x double>* %p
+  %v = load <2 x double>, ptr %p
   ret <2 x double> %v
 }
 
-define <2 x double> @load_splat_v2f64(double* %p) {
+define <2 x double> @load_splat_v2f64(ptr %p) {
 ; CHECK-LABEL: load_splat_v2f64:
 ; CHECK:         .functype load_splat_v2f64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load double, double* %p
+  %e = load double, ptr %p
   %v1 = insertelement <2 x double> undef, double %e, i32 0
   %v2 = shufflevector <2 x double> %v1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %v2
 }
 
-define <2 x double> @load_promote_v2f64(<2 x float>* %p) {
+define <2 x double> @load_promote_v2f64(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64:
 ; CHECK:         .functype load_promote_v2f64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3254,42 +3254,42 @@ define <2 x double> @load_promote_v2f64(<2 x float>* %p) {
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load <2 x float>, <2 x float>* %p
+  %e = load <2 x float>, ptr %p
   %v = fpext <2 x float> %e to <2 x double>
   ret <2 x double> %v
 }
 
-define <2 x double> @load_v2f64_with_folded_offset(<2 x double>* %p) {
+define <2 x double> @load_v2f64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_v2f64_with_folded_offset:
 ; CHECK:         .functype load_v2f64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x double>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x double>*
-  %v = load <2 x double>, <2 x double>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x double>, ptr %s
   ret <2 x double> %v
 }
 
-define <2 x double> @load_splat_v2f64_with_folded_offset(double* %p) {
+define <2 x double> @load_splat_v2f64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2f64_with_folded_offset:
 ; CHECK:         .functype load_splat_v2f64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint double* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to double*
-  %e = load double, double* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load double, ptr %s
   %v1 = insertelement <2 x double> undef, double %e, i32 0
   %v2 = shufflevector <2 x double> %v1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %v2
 }
 
-define <2 x double> @load_promote_v2f64_with_folded_offset(<2 x float>* %p) {
+define <2 x double> @load_promote_v2f64_with_folded_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_folded_offset:
 ; CHECK:         .functype load_promote_v2f64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3299,41 +3299,41 @@ define <2 x double> @load_promote_v2f64_with_folded_offset(<2 x float>* %p) {
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x float>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x float>*
-  %e = load <2 x float>, <2 x float>* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load <2 x float>, ptr %s
   %v = fpext <2 x float> %e to <2 x double>
   ret <2 x double> %v
 }
 
-define <2 x double> @load_v2f64_with_folded_gep_offset(<2 x double>* %p) {
+define <2 x double> @load_v2f64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v2f64_with_folded_gep_offset:
 ; CHECK:         .functype load_v2f64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 1
-  %v = load <2 x double>, <2 x double>* %s
+  %s = getelementptr inbounds <2 x double>, ptr %p, i32 1
+  %v = load <2 x double>, ptr %s
   ret <2 x double> %v
 }
 
-define <2 x double> @load_splat_v2f64_with_folded_gep_offset(double* %p) {
+define <2 x double> @load_splat_v2f64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2f64_with_folded_gep_offset:
 ; CHECK:         .functype load_splat_v2f64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.load64_splat 8
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds double, double* %p, i32 1
-  %e = load double, double* %s
+  %s = getelementptr inbounds double, ptr %p, i32 1
+  %e = load double, ptr %s
   %v1 = insertelement <2 x double> undef, double %e, i32 0
   %v2 = shufflevector <2 x double> %v1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %v2
 }
 
-define <2 x double> @load_promote_v2f64_with_folded_gep_offset(<2 x float>* %p) {
+define <2 x double> @load_promote_v2f64_with_folded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_folded_gep_offset:
 ; CHECK:         .functype load_promote_v2f64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3343,13 +3343,13 @@ define <2 x double> @load_promote_v2f64_with_folded_gep_offset(<2 x float>* %p)
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x float>, <2 x float>* %p, i32 1
-  %e = load <2 x float>, <2 x float>* %s
+  %s = getelementptr inbounds <2 x float>, ptr %p, i32 1
+  %e = load <2 x float>, ptr %s
   %v = fpext <2 x float> %e to <2 x double>
   ret <2 x double> %v
 }
 
-define <2 x double> @load_v2f64_with_unfolded_gep_negative_offset(<2 x double>* %p) {
+define <2 x double> @load_v2f64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_v2f64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_v2f64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3358,12 +3358,12 @@ define <2 x double> @load_v2f64_with_unfolded_gep_negative_offset(<2 x double>*
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -1
-  %v = load <2 x double>, <2 x double>* %s
+  %s = getelementptr inbounds <2 x double>, ptr %p, i32 -1
+  %v = load <2 x double>, ptr %s
   ret <2 x double> %v
 }
 
-define <2 x double> @load_splat_v2f64_with_unfolded_gep_negative_offset(double* %p) {
+define <2 x double> @load_splat_v2f64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2f64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_splat_v2f64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3372,14 +3372,14 @@ define <2 x double> @load_splat_v2f64_with_unfolded_gep_negative_offset(double*
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds double, double* %p, i32 -1
-  %e = load double, double* %s
+  %s = getelementptr inbounds double, ptr %p, i32 -1
+  %e = load double, ptr %s
   %v1 = insertelement <2 x double> undef, double %e, i32 0
   %v2 = shufflevector <2 x double> %v1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %v2
 }
 
-define <2 x double> @load_promote_v2f64_with_unfolded_gep_negative_offset(<2 x float>* %p) {
+define <2 x double> @load_promote_v2f64_with_unfolded_gep_negative_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype load_promote_v2f64_with_unfolded_gep_negative_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3389,13 +3389,13 @@ define <2 x double> @load_promote_v2f64_with_unfolded_gep_negative_offset(<2 x f
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x float>, <2 x float>* %p, i32 -1
-  %e = load <2 x float>, <2 x float>* %s
+  %s = getelementptr inbounds <2 x float>, ptr %p, i32 -1
+  %e = load <2 x float>, ptr %s
   %v = fpext <2 x float> %e to <2 x double>
   ret <2 x double> %v
 }
 
-define <2 x double> @load_v2f64_with_unfolded_offset(<2 x double>* %p) {
+define <2 x double> @load_v2f64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_v2f64_with_unfolded_offset:
 ; CHECK:         .functype load_v2f64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3404,14 +3404,14 @@ define <2 x double> @load_v2f64_with_unfolded_offset(<2 x double>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x double>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x double>*
-  %v = load <2 x double>, <2 x double>* %s
+  %s = inttoptr i32 %r to ptr
+  %v = load <2 x double>, ptr %s
   ret <2 x double> %v
 }
 
-define <2 x double> @load_splat_v2f64_with_unfolded_offset(double* %p) {
+define <2 x double> @load_splat_v2f64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2f64_with_unfolded_offset:
 ; CHECK:         .functype load_splat_v2f64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3420,16 +3420,16 @@ define <2 x double> @load_splat_v2f64_with_unfolded_offset(double* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint double* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to double*
-  %e = load double, double* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load double, ptr %s
   %v1 = insertelement <2 x double> undef, double %e, i32 0
   %v2 = shufflevector <2 x double> %v1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %v2
 }
 
-define <2 x double> @load_promote_v2f64_with_unfolded_offset(<2 x float>* %p) {
+define <2 x double> @load_promote_v2f64_with_unfolded_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_unfolded_offset:
 ; CHECK:         .functype load_promote_v2f64_with_unfolded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3439,15 +3439,15 @@ define <2 x double> @load_promote_v2f64_with_unfolded_offset(<2 x float>* %p) {
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x float>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x float>*
-  %e = load <2 x float>, <2 x float>* %s
+  %s = inttoptr i32 %r to ptr
+  %e = load <2 x float>, ptr %s
   %v = fpext <2 x float> %e to <2 x double>
   ret <2 x double> %v
 }
 
-define <2 x double> @load_v2f64_with_unfolded_gep_offset(<2 x double>* %p) {
+define <2 x double> @load_v2f64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_v2f64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_v2f64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3456,12 +3456,12 @@ define <2 x double> @load_v2f64_with_unfolded_gep_offset(<2 x double>* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <2 x double>, <2 x double>* %p, i32 1
-  %v = load <2 x double>, <2 x double>* %s
+  %s = getelementptr <2 x double>, ptr %p, i32 1
+  %v = load <2 x double>, ptr %s
   ret <2 x double> %v
 }
 
-define <2 x double> @load_splat_v2f64_with_unfolded_gep_offset(double* %p) {
+define <2 x double> @load_splat_v2f64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_splat_v2f64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_splat_v2f64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3470,14 +3470,14 @@ define <2 x double> @load_splat_v2f64_with_unfolded_gep_offset(double* %p) {
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    v128.load64_splat 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr double, double* %p, i32 1
-  %e = load double, double* %s
+  %s = getelementptr double, ptr %p, i32 1
+  %e = load double, ptr %s
   %v1 = insertelement <2 x double> undef, double %e, i32 0
   %v2 = shufflevector <2 x double> %v1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %v2
 }
 
-define <2 x double> @load_promote_v2f64_with_unfolded_gep_offset(<2 x float>* %p) {
+define <2 x double> @load_promote_v2f64_with_unfolded_gep_offset(ptr %p) {
 ; CHECK-LABEL: load_promote_v2f64_with_unfolded_gep_offset:
 ; CHECK:         .functype load_promote_v2f64_with_unfolded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
@@ -3487,8 +3487,8 @@ define <2 x double> @load_promote_v2f64_with_unfolded_gep_offset(<2 x float>* %p
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <2 x float>, <2 x float>* %p, i32 1
-  %e = load <2 x float>, <2 x float>* %s
+  %s = getelementptr <2 x float>, ptr %p, i32 1
+  %e = load <2 x float>, ptr %s
   %v = fpext <2 x float> %e to <2 x double>
   ret <2 x double> %v
 }
@@ -3500,8 +3500,8 @@ define <2 x double> @load_v2f64_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <2 x double>*
-  %v = load <2 x double>, <2 x double>* %s
+  %s = inttoptr i32 32 to ptr
+  %v = load <2 x double>, ptr %s
   ret <2 x double> %v
 }
 
@@ -3512,8 +3512,8 @@ define <2 x double> @load_splat_v2f64_from_numeric_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_splat 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to double*
-  %e = load double, double* %s
+  %s = inttoptr i32 32 to ptr
+  %e = load double, ptr %s
   %v1 = insertelement <2 x double> undef, double %e, i32 0
   %v2 = shufflevector <2 x double> %v1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %v2
@@ -3527,8 +3527,8 @@ define <2 x double> @load_promote_v2f64_from_numeric_address() {
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <2 x float>*
-  %e = load <2 x float>, <2 x float>* %s
+  %s = inttoptr i32 32 to ptr
+  %e = load <2 x float>, ptr %s
   %v = fpext <2 x float> %e to <2 x double>
   ret <2 x double> %v
 }
@@ -3541,7 +3541,7 @@ define <2 x double> @load_v2f64_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load gv_v2f64
 ; CHECK-NEXT:    # fallthrough-return
-  %v = load <2 x double>, <2 x double>* @gv_v2f64
+  %v = load <2 x double>, ptr @gv_v2f64
   ret <2 x double> %v
 }
 
@@ -3553,7 +3553,7 @@ define <2 x double> @load_splat_v2f64_from_global_address() {
 ; CHECK-NEXT:    i32.const 0
 ; CHECK-NEXT:    v128.load64_splat gv_f64
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load double, double* @gv_f64
+  %e = load double, ptr @gv_f64
   %v1 = insertelement <2 x double> undef, double %e, i32 0
   %v2 = shufflevector <2 x double> %v1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %v2
@@ -3568,12 +3568,12 @@ define <2 x double> @load_promote_v2f64_from_global_address() {
 ; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    f64x2.promote_low_f32x4
 ; CHECK-NEXT:    # fallthrough-return
-  %e = load <2 x float>, <2 x float>* @gv_v2f32
+  %e = load <2 x float>, ptr @gv_v2f32
   %v = fpext <2 x float> %e to <2 x double>
   ret <2 x double> %v
 }
 
-define void @store_v2f64(<2 x double> %v, <2 x double>* %p) {
+define void @store_v2f64(<2 x double> %v, ptr %p) {
 ; CHECK-LABEL: store_v2f64:
 ; CHECK:         .functype store_v2f64 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3581,11 +3581,11 @@ define void @store_v2f64(<2 x double> %v, <2 x double>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x double> %v , <2 x double>* %p
+  store <2 x double> %v , ptr %p
   ret void
 }
 
-define void @store_v2f64_with_folded_offset(<2 x double> %v, <2 x double>* %p) {
+define void @store_v2f64_with_folded_offset(<2 x double> %v, ptr %p) {
 ; CHECK-LABEL: store_v2f64_with_folded_offset:
 ; CHECK:         .functype store_v2f64_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3593,14 +3593,14 @@ define void @store_v2f64_with_folded_offset(<2 x double> %v, <2 x double>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x double>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nuw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x double>*
-  store <2 x double> %v , <2 x double>* %s
+  %s = inttoptr i32 %r to ptr
+  store <2 x double> %v , ptr %s
   ret void
 }
 
-define void @store_v2f64_with_folded_gep_offset(<2 x double> %v, <2 x double>* %p) {
+define void @store_v2f64_with_folded_gep_offset(<2 x double> %v, ptr %p) {
 ; CHECK-LABEL: store_v2f64_with_folded_gep_offset:
 ; CHECK:         .functype store_v2f64_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3608,12 +3608,12 @@ define void @store_v2f64_with_folded_gep_offset(<2 x double> %v, <2 x double>* %
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 16
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 1
-  store <2 x double> %v , <2 x double>* %s
+  %s = getelementptr inbounds <2 x double>, ptr %p, i32 1
+  store <2 x double> %v , ptr %s
   ret void
 }
 
-define void @store_v2f64_with_unfolded_gep_negative_offset(<2 x double> %v, <2 x double>* %p) {
+define void @store_v2f64_with_unfolded_gep_negative_offset(<2 x double> %v, ptr %p) {
 ; CHECK-LABEL: store_v2f64_with_unfolded_gep_negative_offset:
 ; CHECK:         .functype store_v2f64_with_unfolded_gep_negative_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3623,12 +3623,12 @@ define void @store_v2f64_with_unfolded_gep_negative_offset(<2 x double> %v, <2 x
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -1
-  store <2 x double> %v , <2 x double>* %s
+  %s = getelementptr inbounds <2 x double>, ptr %p, i32 -1
+  store <2 x double> %v , ptr %s
   ret void
 }
 
-define void @store_v2f64_with_unfolded_offset(<2 x double> %v, <2 x double>* %p) {
+define void @store_v2f64_with_unfolded_offset(<2 x double> %v, ptr %p) {
 ; CHECK-LABEL: store_v2f64_with_unfolded_offset:
 ; CHECK:         .functype store_v2f64_with_unfolded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3638,14 +3638,14 @@ define void @store_v2f64_with_unfolded_offset(<2 x double> %v, <2 x double>* %p)
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %q = ptrtoint <2 x double>* %p to i32
+  %q = ptrtoint ptr %p to i32
   %r = add nsw i32 %q, 16
-  %s = inttoptr i32 %r to <2 x double>*
-  store <2 x double> %v , <2 x double>* %s
+  %s = inttoptr i32 %r to ptr
+  store <2 x double> %v , ptr %s
   ret void
 }
 
-define void @store_v2f64_with_unfolded_gep_offset(<2 x double> %v, <2 x double>* %p) {
+define void @store_v2f64_with_unfolded_gep_offset(<2 x double> %v, ptr %p) {
 ; CHECK-LABEL: store_v2f64_with_unfolded_gep_offset:
 ; CHECK:         .functype store_v2f64_with_unfolded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
@@ -3655,8 +3655,8 @@ define void @store_v2f64_with_unfolded_gep_offset(<2 x double> %v, <2 x double>*
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 0
 ; CHECK-NEXT:    # fallthrough-return
-  %s = getelementptr <2 x double>, <2 x double>* %p, i32 1
-  store <2 x double> %v , <2 x double>* %s
+  %s = getelementptr <2 x double>, ptr %p, i32 1
+  store <2 x double> %v , ptr %s
   ret void
 }
 
@@ -3668,8 +3668,8 @@ define void @store_v2f64_to_numeric_address(<2 x double> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store 32
 ; CHECK-NEXT:    # fallthrough-return
-  %s = inttoptr i32 32 to <2 x double>*
-  store <2 x double> %v , <2 x double>* %s
+  %s = inttoptr i32 32 to ptr
+  store <2 x double> %v , ptr %s
   ret void
 }
 
@@ -3681,6 +3681,6 @@ define void @store_v2f64_to_global_address(<2 x double> %v) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    v128.store gv_v2f64
 ; CHECK-NEXT:    # fallthrough-return
-  store <2 x double> %v , <2 x double>* @gv_v2f64
+  store <2 x double> %v , ptr @gv_v2f64
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-simplify-demanded-vector-elts.ll b/llvm/test/CodeGen/WebAssembly/simd-simplify-demanded-vector-elts.ll
index d03d9c0ceeb9..fd6e466d9b75 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-simplify-demanded-vector-elts.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-simplify-demanded-vector-elts.ll
@@ -24,6 +24,6 @@ define void @test(i8 %0) {
   %10 = sub nsw <4 x i32> <i32 655360000, i32 655360000, i32 655360000, i32 655360000>, %9
   %11 = ashr exact <4 x i32> %10, <i32 16, i32 16, i32 16, i32 16>
   %12 = trunc <4 x i32> %11 to <4 x i16>
-  store <4 x i16> %12, <4 x i16>* undef, align 4
+  store <4 x i16> %12, ptr undef, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/stack-alignment.ll b/llvm/test/CodeGen/WebAssembly/stack-alignment.ll
index 229843ed4190..2ed05e48448a 100644
--- a/llvm/test/CodeGen/WebAssembly/stack-alignment.ll
+++ b/llvm/test/CodeGen/WebAssembly/stack-alignment.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck -DPTR=32 %s
 ; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck -DPTR=64 %s
 
-declare void @somefunc(i32*)
+declare void @somefunc(ptr)
 
 ; CHECK-LABEL: underalign:
 ; CHECK:      global.get $push[[L1:.+]]=, __stack_pointer{{$}}
@@ -19,7 +19,7 @@ declare void @somefunc(i32*)
 define void @underalign() {
 entry:
   %underaligned = alloca i32, align 8
-  call void @somefunc(i32* %underaligned)
+  call void @somefunc(ptr %underaligned)
   ret void
 }
 
@@ -40,7 +40,7 @@ entry:
 define void @overalign() {
 entry:
   %overaligned = alloca i32, align 32
-  call void @somefunc(i32* %overaligned)
+  call void @somefunc(ptr %overaligned)
   ret void
 }
 
@@ -64,8 +64,8 @@ define void @over_and_normal_align() {
 entry:
   %over = alloca i32, align 32
   %normal = alloca i32
-  call void @somefunc(i32* %over)
-  call void @somefunc(i32* %normal)
+  call void @somefunc(ptr %over)
+  call void @somefunc(ptr %normal)
   ret void
 }
 
@@ -83,7 +83,7 @@ entry:
 define void @dynamic_overalign(i32 %num) {
 entry:
   %dynamic = alloca i32, i32 %num, align 32
-  call void @somefunc(i32* %dynamic)
+  call void @somefunc(ptr %dynamic)
   ret void
 }
 
@@ -108,8 +108,8 @@ define void @overalign_and_dynamic(i32 %num) {
 entry:
   %over = alloca i32, align 32
   %dynamic = alloca i32, i32 %num
-  call void @somefunc(i32* %over)
-  call void @somefunc(i32* %dynamic)
+  call void @somefunc(ptr %over)
+  call void @somefunc(ptr %dynamic)
   ret void
 }
 
@@ -140,8 +140,8 @@ entry:
   %over = alloca i32, align 32
   %dynamic = alloca i32, i32 %num
   %static = alloca i32
-  call void @somefunc(i32* %over)
-  call void @somefunc(i32* %dynamic)
-  call void @somefunc(i32* %static)
+  call void @somefunc(ptr %over)
+  call void @somefunc(ptr %dynamic)
+  call void @somefunc(ptr %static)
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/stack-protector.ll b/llvm/test/CodeGen/WebAssembly/stack-protector.ll
index 06d3aedba3b6..3a97849b5920 100644
--- a/llvm/test/CodeGen/WebAssembly/stack-protector.ll
+++ b/llvm/test/CodeGen/WebAssembly/stack-protector.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -verify-machineinstrs -mtriple=wasm32-unknown-unknown < %s | FileCheck -check-prefix=WASM32 %s
 
-@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"		; <[11 x i8]*> [#uses=1]
+@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"		; <ptr> [#uses=1]
 
 ; WASM32-LABEL: test:
 ; WASM32:      i32.load        28
@@ -8,17 +8,15 @@
 ; WASM32:      call __stack_chk_fail
 ; WASM32-NEXT: unreachable
 
-define void @test(i8* %a) nounwind ssp {
+define void @test(ptr %a) nounwind ssp {
 entry:
-	%a_addr = alloca i8*		; <i8**> [#uses=2]
-	%buf = alloca [8 x i8]		; <[8 x i8]*> [#uses=2]
+	%a_addr = alloca ptr		; <ptr> [#uses=2]
+	%buf = alloca [8 x i8]		; <ptr> [#uses=2]
   %"alloca point" = bitcast i32 0 to i32		; <i32> [#uses=0]
-	store i8* %a, i8** %a_addr
-	%buf1 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%0 = load i8*, i8** %a_addr, align 4		; <i8*> [#uses=1]
-	%1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind		; <i8*> [#uses=0]
-  %buf2 = bitcast [8 x i8]* %buf to i8*		; <i8*> [#uses=1]
-	%2 = call i32 (i8*, ...) @printf(i8* getelementptr ([11 x i8], [11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind		; <i32> [#uses=0]
+	store ptr %a, ptr %a_addr
+	%0 = load ptr, ptr %a_addr, align 4		; <ptr> [#uses=1]
+	%1 = call ptr @strcpy(ptr %buf, ptr %0) nounwind		; <ptr> [#uses=0]
+	%2 = call i32 (ptr, ...) @printf(ptr @"\01LC", ptr %buf) nounwind		; <i32> [#uses=0]
 	br label %return
 
 return:		; preds = %entry
@@ -29,23 +27,21 @@ return:		; preds = %entry
 ; WASM32:      call __stack_chk_fail
 ; WASM32-NEXT: unreachable
 
-define i32 @test_return_i32(i8* %a) nounwind ssp {
+define i32 @test_return_i32(ptr %a) nounwind ssp {
 entry:
-  %a_addr = alloca i8*    ; <i8**> [#uses=2]
-  %buf = alloca [8 x i8]    ; <[8 x i8]*> [#uses=2]
+  %a_addr = alloca ptr    ; <ptr> [#uses=2]
+  %buf = alloca [8 x i8]    ; <ptr> [#uses=2]
   %"alloca point" = bitcast i32 0 to i32    ; <i32> [#uses=0]
-  store i8* %a, i8** %a_addr
-  %buf1 = bitcast [8 x i8]* %buf to i8*    ; <i8*> [#uses=1]
-  %0 = load i8*, i8** %a_addr, align 4    ; <i8*> [#uses=1]
-  %1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind    ; <i8*> [#uses=0]
-  %buf2 = bitcast [8 x i8]* %buf to i8*    ; <i8*> [#uses=1]
-  %2 = call i32 (i8*, ...) @printf(i8* getelementptr ([11 x i8], [11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind    ; <i32> [#uses=0]
+  store ptr %a, ptr %a_addr
+  %0 = load ptr, ptr %a_addr, align 4    ; <ptr> [#uses=1]
+  %1 = call ptr @strcpy(ptr %buf, ptr %0) nounwind    ; <ptr> [#uses=0]
+  %2 = call i32 (ptr, ...) @printf(ptr @"\01LC", ptr %buf) nounwind    ; <i32> [#uses=0]
   br label %return
 
 return:    ; preds = %entry
   ret i32 0
 }
 
-declare i8* @strcpy(i8*, i8*) nounwind
+declare ptr @strcpy(ptr, ptr) nounwind
 
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(ptr, ...) nounwind

diff  --git a/llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll b/llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll
index 7ab2b3a8404b..54eade26a48b 100644
--- a/llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll
+++ b/llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll
@@ -5,40 +5,40 @@
 
 ; CHECK-LABEL: trunc_i8_i32:
 ; CHECK: i32.atomic.store8 0($0), $1{{$}}
-define void @trunc_i8_i32(i8 *%p, i32 %v) {
+define void @trunc_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  store atomic i8 %t, i8* %p seq_cst, align 1
+  store atomic i8 %t, ptr %p seq_cst, align 1
   ret void
 }
 
 ; CHECK-LABEL: trunc_i16_i32:
 ; CHECK: i32.atomic.store16 0($0), $1{{$}}
-define void @trunc_i16_i32(i16 *%p, i32 %v) {
+define void @trunc_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  store atomic i16 %t, i16* %p seq_cst, align 2
+  store atomic i16 %t, ptr %p seq_cst, align 2
   ret void
 }
 
 ; CHECK-LABEL: trunc_i8_i64:
 ; CHECK: i64.atomic.store8 0($0), $1{{$}}
-define void @trunc_i8_i64(i8 *%p, i64 %v) {
+define void @trunc_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  store atomic i8 %t, i8* %p seq_cst, align 1
+  store atomic i8 %t, ptr %p seq_cst, align 1
   ret void
 }
 
 ; CHECK-LABEL: trunc_i16_i64:
 ; CHECK: i64.atomic.store16 0($0), $1{{$}}
-define void @trunc_i16_i64(i16 *%p, i64 %v) {
+define void @trunc_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  store atomic i16 %t, i16* %p seq_cst, align 2
+  store atomic i16 %t, ptr %p seq_cst, align 2
   ret void
 }
 
 ; CHECK-LABEL: trunc_i32_i64:
 ; CHECK: i64.atomic.store32 0($0), $1{{$}}
-define void @trunc_i32_i64(i32 *%p, i64 %v) {
+define void @trunc_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  store atomic i32 %t, i32* %p seq_cst, align 4
+  store atomic i32 %t, ptr %p seq_cst, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/store-trunc.ll b/llvm/test/CodeGen/WebAssembly/store-trunc.ll
index cce92d71b516..7d58a9281e24 100644
--- a/llvm/test/CodeGen/WebAssembly/store-trunc.ll
+++ b/llvm/test/CodeGen/WebAssembly/store-trunc.ll
@@ -5,40 +5,40 @@
 
 ; CHECK-LABEL: trunc_i8_i32:
 ; CHECK: i32.store8 0($0), $1{{$}}
-define void @trunc_i8_i32(i8 *%p, i32 %v) {
+define void @trunc_i8_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i8
-  store i8 %t, i8* %p
+  store i8 %t, ptr %p
   ret void
 }
 
 ; CHECK-LABEL: trunc_i16_i32:
 ; CHECK: i32.store16 0($0), $1{{$}}
-define void @trunc_i16_i32(i16 *%p, i32 %v) {
+define void @trunc_i16_i32(ptr %p, i32 %v) {
   %t = trunc i32 %v to i16
-  store i16 %t, i16* %p
+  store i16 %t, ptr %p
   ret void
 }
 
 ; CHECK-LABEL: trunc_i8_i64:
 ; CHECK: i64.store8 0($0), $1{{$}}
-define void @trunc_i8_i64(i8 *%p, i64 %v) {
+define void @trunc_i8_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i8
-  store i8 %t, i8* %p
+  store i8 %t, ptr %p
   ret void
 }
 
 ; CHECK-LABEL: trunc_i16_i64:
 ; CHECK: i64.store16 0($0), $1{{$}}
-define void @trunc_i16_i64(i16 *%p, i64 %v) {
+define void @trunc_i16_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i16
-  store i16 %t, i16* %p
+  store i16 %t, ptr %p
   ret void
 }
 
 ; CHECK-LABEL: trunc_i32_i64:
 ; CHECK: i64.store32 0($0), $1{{$}}
-define void @trunc_i32_i64(i32 *%p, i64 %v) {
+define void @trunc_i32_i64(ptr %p, i64 %v) {
   %t = trunc i64 %v to i32
-  store i32 %t, i32* %p
+  store i32 %t, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/store.ll b/llvm/test/CodeGen/WebAssembly/store.ll
index 9c4c62a7209d..d6c1e2fc23c2 100644
--- a/llvm/test/CodeGen/WebAssembly/store.ll
+++ b/llvm/test/CodeGen/WebAssembly/store.ll
@@ -14,8 +14,8 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i32.store 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti32(i32 *%p, i32 %v) {
-  store i32 %v, i32* %p
+define void @sti32(ptr %p, i32 %v) {
+  store i32 %v, ptr %p
   ret void
 }
 
@@ -26,8 +26,8 @@ define void @sti32(i32 *%p, i32 %v) {
 ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: i64.store 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @sti64(i64 *%p, i64 %v) {
-  store i64 %v, i64* %p
+define void @sti64(ptr %p, i64 %v) {
+  store i64 %v, ptr %p
   ret void
 }
 
@@ -38,8 +38,8 @@ define void @sti64(i64 *%p, i64 %v) {
 ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f32.store 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @stf32(float *%p, float %v) {
-  store float %v, float* %p
+define void @stf32(ptr %p, float %v) {
+  store float %v, ptr %p
   ret void
 }
 
@@ -50,7 +50,7 @@ define void @stf32(float *%p, float %v) {
 ; CHECK-NEXT: local.get $push[[L1:[0-9]+]]=, 1{{$}}
 ; CHECK-NEXT: f64.store 0($pop[[L0]]), $pop[[L1]]{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @stf64(double *%p, double %v) {
-  store double %v, double* %p
+define void @stf64(ptr %p, double %v) {
+  store double %v, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/swiftcc.ll b/llvm/test/CodeGen/WebAssembly/swiftcc.ll
index f3e684841ece..e0f67b02c05c 100644
--- a/llvm/test/CodeGen/WebAssembly/swiftcc.ll
+++ b/llvm/test/CodeGen/WebAssembly/swiftcc.ll
@@ -9,36 +9,32 @@ target triple = "wasm32-unknown-unknown"
 define swiftcc void @foo(i32, i32) {
   ret void
 }
- at data = global i8* bitcast (void (i32, i32)* @foo to i8*)
+ at data = global ptr @foo
 
 ; CHECK-LABEL: bar:
 ; CHECK-NEXT: .functype       bar (i32, i32) -> ()
 define swiftcc void @bar() {
-  %1 = load i8*, i8** @data
+  %1 = load ptr, ptr @data
 ; REG: call    foo, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}
   call swiftcc void @foo(i32 1, i32 2)
 
-  %2 = bitcast i8* %1 to void (i32, i32)*
 ; REG: call_indirect   $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}
 ; CHECK: call_indirect   (i32, i32, i32, i32) -> ()
-  call swiftcc void %2(i32 1, i32 2)
+  call swiftcc void %1(i32 1, i32 2)
 
-  %3 = bitcast i8* %1 to void (i32, i32, i32)*
 ; REG: call_indirect   $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}
 ; CHECK: call_indirect   (i32, i32, i32, i32) -> ()
-  call swiftcc void %3(i32 1, i32 2, i32 swiftself 3)
+  call swiftcc void %1(i32 1, i32 2, i32 swiftself 3)
 
-  %err = alloca swifterror i32*, align 4
+  %err = alloca swifterror ptr, align 4
 
-  %4 = bitcast i8* %1 to void (i32, i32, i32**)*
 ; REG: call_indirect   $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}
 ; CHECK: call_indirect   (i32, i32, i32, i32) -> ()
-  call swiftcc void %4(i32 1, i32 2, i32** swifterror %err)
+  call swiftcc void %1(i32 1, i32 2, ptr swifterror %err)
 
-  %5 = bitcast i8* %1 to void (i32, i32, i32, i32**)*
 ; REG: call_indirect   $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}, $pop{{[0-9]+}}
 ; CHECK: call_indirect   (i32, i32, i32, i32) -> ()
-  call swiftcc void %5(i32 1, i32 2, i32 swiftself 3, i32** swifterror %err)
+  call swiftcc void %1(i32 1, i32 2, i32 swiftself 3, ptr swifterror %err)
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/switch-in-loop.ll b/llvm/test/CodeGen/WebAssembly/switch-in-loop.ll
index e3bb0e0b97f3..08e6820c49d4 100644
--- a/llvm/test/CodeGen/WebAssembly/switch-in-loop.ll
+++ b/llvm/test/CodeGen/WebAssembly/switch-in-loop.ll
@@ -6,8 +6,8 @@
 
 target triple = "wasm32"
 
-declare void @a(i32*)
-declare void @b(i32*)
+declare void @a(ptr)
+declare void @b(ptr)
 
 ; CHECK-LABEL: switch_in_loop:
 ; CHECK-NEXT: .functype switch_in_loop (i32, i32) -> (i32)
@@ -36,37 +36,36 @@ declare void @b(i32*)
 ; CHECK:    end_block
 ; CHECK:    global.set __stack_pointer
 ; CHECK:    end_function
-define i32 @switch_in_loop(i32* %ops, i32 %len) {
+define i32 @switch_in_loop(ptr %ops, i32 %len) {
 entry:
   %res = alloca i32
-  %0 = bitcast i32* %res to i8*
-  store i32 0, i32* %res
+  store i32 0, ptr %res
   %cmp6 = icmp sgt i32 %len, 0
   br i1 %cmp6, label %for.body, label %for.cond.cleanup
 
 for.cond.cleanup.loopexit:                        ; preds = %sw.epilog
-  %.pre = load i32, i32* %res
+  %.pre = load i32, ptr %res
   br label %for.cond.cleanup
 
 for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
-  %1 = phi i32 [ %.pre, %for.cond.cleanup.loopexit ], [ 0, %entry ]
-  ret i32 %1
+  %0 = phi i32 [ %.pre, %for.cond.cleanup.loopexit ], [ 0, %entry ]
+  ret i32 %0
 
 for.body:                                         ; preds = %entry, %sw.epilog
   %i.07 = phi i32 [ %inc, %sw.epilog ], [ 0, %entry ]
-  %arrayidx = getelementptr inbounds i32, i32* %ops, i32 %i.07
-  %2 = load i32, i32* %arrayidx
-  switch i32 %2, label %sw.epilog [
+  %arrayidx = getelementptr inbounds i32, ptr %ops, i32 %i.07
+  %1 = load i32, ptr %arrayidx
+  switch i32 %1, label %sw.epilog [
     i32 0, label %sw.bb
     i32 1, label %sw.bb1
   ]
 
 sw.bb:                                            ; preds = %for.body
-  call void @a(i32* nonnull %res)
+  call void @a(ptr nonnull %res)
   br label %sw.epilog
 
 sw.bb1:                                           ; preds = %for.body
-  call void @b(i32* nonnull %res)
+  call void @b(ptr nonnull %res)
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %for.body, %sw.bb1, %sw.bb

diff  --git a/llvm/test/CodeGen/WebAssembly/tailcall.ll b/llvm/test/CodeGen/WebAssembly/tailcall.ll
index 3d96c666ddc5..07cdea1ec9b0 100644
--- a/llvm/test/CodeGen/WebAssembly/tailcall.ll
+++ b/llvm/test/CodeGen/WebAssembly/tailcall.ll
@@ -6,7 +6,7 @@
 
 target triple = "wasm32-unknown-unknown"
 
-%fn = type <{i32 (%fn, i32, i32)*}>
+%fn = type <{ptr}>
 declare i1 @foo(i1)
 declare i1 @bar(i1)
 
@@ -87,7 +87,7 @@ define i32 @indirect_tail(%fn %f, i32 %x, i32 %y) {
 ; CHECK: call_indirect $push[[L:[0-9]+]]=, $0, $pop{{[0-9]+}}{{$}}
 ; CHECK-NEXT: return $pop[[L]]{{$}}
 define i1 @choice_notail(i1 %x) {
-  %p = select i1 %x, i1 (i1)* @foo, i1 (i1)* @bar
+  %p = select i1 %x, ptr @foo, ptr @bar
   %v = notail call i1 %p(i1 %x)
   ret i1 %v
 }
@@ -95,7 +95,7 @@ define i1 @choice_notail(i1 %x) {
 ; CHECK-LABEL: choice_musttail:
 ; CHECK: return_call_indirect , $0, $pop{{[0-9]+}}{{$}}
 define i1 @choice_musttail(i1 %x) {
-  %p = select i1 %x, i1 (i1)* @foo, i1 (i1)* @bar
+  %p = select i1 %x, ptr @foo, ptr @bar
   %v = musttail call i1 %p(i1 %x)
   ret i1 %v
 }
@@ -105,7 +105,7 @@ define i1 @choice_musttail(i1 %x) {
 ; FAST: call_indirect $push[[L:[0-9]+]]=, $0, $pop{{[0-9]+}}{{$}}
 ; FAST: return $pop[[L]]{{$}}
 define i1 @choice_tail(i1 %x) {
-  %p = select i1 %x, i1 (i1)* @foo, i1 (i1)* @bar
+  %p = select i1 %x, ptr @foo, ptr @bar
   %v = tail call i1 %p(i1 %x)
   ret i1 %v
 }
@@ -165,9 +165,9 @@ define float @mismatched_indirect_f32(%fn %f, i32 %x, i32 %y) {
 ; CHECK-LABEL: mismatched_byval:
 ; CHECK: i32.store
 ; CHECK: return_call quux, $pop{{[0-9]+}}{{$}}
-declare i32 @quux(i32* byval(i32))
-define i32 @mismatched_byval(i32* %x) {
-  %v = tail call i32 @quux(i32* byval(i32) %x)
+declare i32 @quux(ptr byval(i32))
+define i32 @mismatched_byval(ptr %x) {
+  %v = tail call i32 @quux(ptr byval(i32) %x)
   ret i32 %v
 }
 
@@ -212,18 +212,18 @@ define i1 @mismatched_return_trunc() {
 
 ; CHECK-LABEL: stack_arg:
 ; CHECK: call
-define i32 @stack_arg(i32* %x) {
+define i32 @stack_arg(ptr %x) {
   %a = alloca i32
-  %v = tail call i32 @stack_arg(i32* %a)
+  %v = tail call i32 @stack_arg(ptr %a)
   ret i32 %v
 }
 
 ; CHECK-LABEL: stack_arg_gep:
 ; CHECK: call
-define i32 @stack_arg_gep(i32* %x) {
+define i32 @stack_arg_gep(ptr %x) {
   %a = alloca { i32, i32 }
-  %p = getelementptr { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
-  %v = tail call i32 @stack_arg_gep(i32* %p)
+  %p = getelementptr { i32, i32 }, ptr %a, i32 0, i32 1
+  %v = tail call i32 @stack_arg_gep(ptr %p)
   ret i32 %v
 }
 
@@ -235,7 +235,7 @@ define i32 @stack_arg_gep(i32* %x) {
 ; SLOW: return_call stack_arg_cast, ${{[0-9]+}}
 define i32 @stack_arg_cast(i32 %x) {
   %a = alloca [64 x i32]
-  %i = ptrtoint [64 x i32]* %a to i32
+  %i = ptrtoint ptr %a to i32
   %v = tail call i32 @stack_arg_cast(i32 %i)
   ret i32 %v
 }
@@ -251,8 +251,8 @@ define i32 @stack_arg_cast(i32 %x) {
 ; YAML-NEXT:      - F64
 ; YAML-NEXT:    ReturnTypes:
 ; YAML-NEXT:      - I32
-define i32 @unique_caller(i32 (i32, float, i64, double)** %p) {
-  %f = load i32 (i32, float, i64, double)*, i32 (i32, float, i64, double)** %p
+define i32 @unique_caller(ptr %p) {
+  %f = load ptr, ptr %p
   %v = tail call i32 %f(i32 0, float 0., i64 0, double 0.)
   ret i32 %v
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/target-features.ll b/llvm/test/CodeGen/WebAssembly/target-features.ll
index 4debf66fe0f7..953a4ac9b41d 100644
--- a/llvm/test/CodeGen/WebAssembly/target-features.ll
+++ b/llvm/test/CodeGen/WebAssembly/target-features.ll
@@ -8,17 +8,17 @@
 
 target triple = "wasm32-unknown-unknown"
 
-define void @fn_atomics(i32* %p1, float %f2) #0 {
-  %a = atomicrmw min i32* undef, i32 42 seq_cst
+define void @fn_atomics(ptr %p1, float %f2) #0 {
+  %a = atomicrmw min ptr undef, i32 42 seq_cst
   %v = fptoui float %f2 to i32
-  store i32 %v, i32* %p1
+  store i32 %v, ptr %p1
   ret void
 }
 
-define void @fn_nontrapping_fptoint(i32* %p1, float %f2) #1 {
-  %a = atomicrmw min i32* undef, i32 42 seq_cst
+define void @fn_nontrapping_fptoint(ptr %p1, float %f2) #1 {
+  %a = atomicrmw min ptr undef, i32 42 seq_cst
   %v = fptoui float %f2 to i32
-  store i32 %v, i32* %p1
+  store i32 %v, ptr %p1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/tls-general-dynamic.ll b/llvm/test/CodeGen/WebAssembly/tls-general-dynamic.ll
index c21102cd2aa9..46ab62dfaaa2 100644
--- a/llvm/test/CodeGen/WebAssembly/tls-general-dynamic.ll
+++ b/llvm/test/CodeGen/WebAssembly/tls-general-dynamic.ll
@@ -14,7 +14,7 @@ define i32 @address_of_tls() {
 
   ; NO-TLS-NEXT: i32.const tls
   ; NO-TLS-NEXT: return
-  ret i32 ptrtoint(i32* @tls to i32)
+  ret i32 ptrtoint(ptr @tls to i32)
 }
 
 ; CHECK-LABEL: address_of_tls_external:
@@ -25,12 +25,12 @@ define i32 @address_of_tls_external() {
 
   ; NO-TLS-NEXT: i32.const tls_external
   ; NO-TLS-NEXT: return
-  ret i32 ptrtoint(i32* @tls_external to i32)
+  ret i32 ptrtoint(ptr @tls_external to i32)
 }
 
 ; CHECK-LABEL: ptr_to_tls:
 ; CHECK-NEXT: .functype ptr_to_tls () -> (i32)
-define i32* @ptr_to_tls() {
+define ptr @ptr_to_tls() {
   ; TLS-DAG: global.get __tls_base
   ; TLS-DAG: i32.const tls at TLSREL
   ; TLS-NEXT: i32.add
@@ -38,18 +38,18 @@ define i32* @ptr_to_tls() {
 
   ; NO-TLS-NEXT: i32.const tls
   ; NO-TLS-NEXT: return
-  ret i32* @tls
+  ret ptr @tls
 }
 
 ; CHECK-LABEL: ptr_to_tls_external:
 ; CHECK-NEXT: .functype ptr_to_tls_external () -> (i32)
-define i32* @ptr_to_tls_external() {
+define ptr @ptr_to_tls_external() {
   ; TLS-DAG: global.get tls_external at GOT@TLS
   ; TLS-NEXT: return
 
   ; NO-TLS-NEXT: i32.const tls_external
   ; NO-TLS-NEXT: return
-  ret i32* @tls_external
+  ret ptr @tls_external
 }
 
 ; CHECK-LABEL: tls_load:
@@ -64,7 +64,7 @@ define i32 @tls_load() {
   ; NO-TLS-NEXT: i32.const 0
   ; NO-TLS-NEXT: i32.load tls
   ; NO-TLS-NEXT: return
-  %tmp = load i32, i32* @tls, align 4
+  %tmp = load i32, ptr @tls, align 4
   ret i32 %tmp
 }
 
@@ -78,7 +78,7 @@ define i32 @tls_load_external() {
   ; NO-TLS-NEXT: i32.const 0
   ; NO-TLS-NEXT: i32.load tls_external
   ; NO-TLS-NEXT: return
-  %tmp = load i32, i32* @tls_external, align 4
+  %tmp = load i32, ptr @tls_external, align 4
   ret i32 %tmp
 }
 
@@ -94,7 +94,7 @@ define void @tls_store(i32 %x) {
   ; NO-TLS-NEXT: i32.const 0
   ; NO-TLS-NEXT: i32.store tls
   ; NO-TLS-NEXT: return
-  store i32 %x, i32* @tls, align 4
+  store i32 %x, ptr @tls, align 4
   ret void
 }
 
@@ -108,7 +108,7 @@ define void @tls_store_external(i32 %x) {
   ; NO-TLS-NEXT: i32.const 0
   ; NO-TLS-NEXT: i32.store tls_external
   ; NO-TLS-NEXT: return
-  store i32 %x, i32* @tls_external, align 4
+  store i32 %x, ptr @tls_external, align 4
   ret void
 }
 
@@ -132,21 +132,21 @@ define i32 @tls_align() {
 
 ; CHECK-LABEL: tls_base:
 ; CHECK-NEXT: .functype tls_base () -> (i32)
-define i8* @tls_base() {
+define ptr @tls_base() {
 ; CHECK-NEXT: global.get __tls_base
 ; CHECK-NEXT: return
-  %1 = call i8* @llvm.wasm.tls.base()
-  ret i8* %1
+  %1 = call ptr @llvm.wasm.tls.base()
+  ret ptr %1
 }
 
 ; CHECK-LABEL: tls_base_write:
 ; CHECK-NEXT: .functype tls_base_write (i32) -> ()
-define void @tls_base_write(i8** %output) {
+define void @tls_base_write(ptr %output) {
 ; CHECK-NEXT: global.get __tls_base
 ; CHECK-NEXT: i32.store 0
 ; CHECK-NEXT: return
-  %1 = call i8* @llvm.wasm.tls.base()
-  store i8* %1, i8** %output
+  %1 = call ptr @llvm.wasm.tls.base()
+  store ptr %1, ptr %output
   ret void
 }
 
@@ -162,4 +162,4 @@ define void @tls_base_write(i8** %output) {
 
 declare i32 @llvm.wasm.tls.size.i32()
 declare i32 @llvm.wasm.tls.align.i32()
-declare i8* @llvm.wasm.tls.base()
+declare ptr @llvm.wasm.tls.base()

diff  --git a/llvm/test/CodeGen/WebAssembly/tls-local-exec.ll b/llvm/test/CodeGen/WebAssembly/tls-local-exec.ll
index 45d4c7512a8d..3aa044c34789 100644
--- a/llvm/test/CodeGen/WebAssembly/tls-local-exec.ll
+++ b/llvm/test/CodeGen/WebAssembly/tls-local-exec.ll
@@ -20,7 +20,7 @@ define i32 @address_of_tls() {
 
   ; NO-TLS-NEXT: i32.const tls
   ; NO-TLS-NEXT: return
-  ret i32 ptrtoint(i32* @tls to i32)
+  ret i32 ptrtoint(ptr @tls to i32)
 }
 
 ; CHECK-LABEL: address_of_tls_external:
@@ -33,12 +33,12 @@ define i32 @address_of_tls_external() {
 
   ; NO-TLS-NEXT: i32.const tls_external
   ; NO-TLS-NEXT: return
-  ret i32 ptrtoint(i32* @tls_external to i32)
+  ret i32 ptrtoint(ptr @tls_external to i32)
 }
 
 ; CHECK-LABEL: ptr_to_tls:
 ; CHECK-NEXT: .functype ptr_to_tls () -> (i32)
-define i32* @ptr_to_tls() {
+define ptr @ptr_to_tls() {
   ; TLS-DAG: global.get __tls_base
   ; TLS-DAG: i32.const tls at TLSREL
   ; TLS-NEXT: i32.add
@@ -46,7 +46,7 @@ define i32* @ptr_to_tls() {
 
   ; NO-TLS-NEXT: i32.const tls
   ; NO-TLS-NEXT: return
-  ret i32* @tls
+  ret ptr @tls
 }
 
 ; CHECK-LABEL: tls_load:
@@ -61,7 +61,7 @@ define i32 @tls_load() {
   ; NO-TLS-NEXT: i32.const 0
   ; NO-TLS-NEXT: i32.load tls
   ; NO-TLS-NEXT: return
-  %tmp = load i32, i32* @tls, align 4
+  %tmp = load i32, ptr @tls, align 4
   ret i32 %tmp
 }
 
@@ -77,7 +77,7 @@ define void @tls_store(i32 %x) {
   ; NO-TLS-NEXT: i32.const 0
   ; NO-TLS-NEXT: i32.store tls
   ; NO-TLS-NEXT: return
-  store i32 %x, i32* @tls, align 4
+  store i32 %x, ptr @tls, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/umulo-i64.ll b/llvm/test/CodeGen/WebAssembly/umulo-i64.ll
index efac514e9b56..dabe643e3228 100644
--- a/llvm/test/CodeGen/WebAssembly/umulo-i64.ll
+++ b/llvm/test/CodeGen/WebAssembly/umulo-i64.ll
@@ -9,7 +9,7 @@ define void @"_ZN4core3num21_$LT$impl$u20$u64$GT$15overflowing_mul17h07be88b4cba
 start:
   %2 = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %0, i64 %1)
   %3 = extractvalue { i64, i1 } %2, 0
-  store i64 %3, i64* undef
+  store i64 %3, ptr undef
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll b/llvm/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
index 418408bf92c5..9c638199bb6e 100644
--- a/llvm/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
+++ b/llvm/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
@@ -6,7 +6,7 @@
 target triple = "wasm32-unknown-unknown"
 
 declare i32 @has_i64_arg(i64)
-declare i32 @has_ptr_arg(i8*)
+declare i32 @has_ptr_arg(ptr)
 
 ; CHECK-LABEL: test_invalid_rtn:
 ; CHECK:      i32.const   $push[[L0:[0-9]+]]=, 0{{$}}
@@ -18,15 +18,15 @@ declare i32 @has_ptr_arg(i8*)
 ; CHECK-NEXT: end_function
 define void @test_invalid_rtn() {
 entry:
-  call i32 bitcast (i32 (i64)* @has_i64_arg to i32 (i32)*)(i32 0)
-  call [1 x i64] bitcast (i32 (i64)* @has_i64_arg to [1 x i64] (i64)*)(i64 0)
+  call i32 @has_i64_arg(i32 0)
+  call [1 x i64] @has_i64_arg(i64 0)
   ret void
 }
 
 ; CHECK-LABEL: test_struct_rtn:
 ; CHECK: 	call    	has_i64_arg, $pop6, $pop0
 define void @test_struct_rtn() {
-  call {i32, i32} bitcast (i32 (i64)* @has_i64_arg to {i32, i32} (i64)*)(i64 0)
+  call {i32, i32} @has_i64_arg(i64 0)
   ret void
 }
 
@@ -43,9 +43,9 @@ define void @test_struct_rtn() {
 ; CHECK-NEXT: 	end_function
 define void @test_invalid_arg() {
 entry:
-  call i32 bitcast (i32 (i8*)* @has_ptr_arg to i32 (i8)*)(i8 2)
-  call i32 bitcast (i32 (i8*)* @has_ptr_arg to i32 (i32)*)(i32 2)
-  call i32 bitcast (i32 (i8*)* @has_ptr_arg to i32 (i64)*)(i64 3)
+  call i32 @has_ptr_arg(i8 2)
+  call i32 @has_ptr_arg(i32 2)
+  call i32 @has_ptr_arg(i64 3)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/WebAssembly/userstack.ll b/llvm/test/CodeGen/WebAssembly/userstack.ll
index dc36c466fb3e..61706dbca165 100644
--- a/llvm/test/CodeGen/WebAssembly/userstack.ll
+++ b/llvm/test/CodeGen/WebAssembly/userstack.ll
@@ -1,8 +1,8 @@
 ; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck -DPTR=32 %s
 ; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck -DPTR=64 %s
 
-declare void @ext_func(i64* %ptr)
-declare void @ext_func_i32(i32* %ptr)
+declare void @ext_func(ptr %ptr)
+declare void @ext_func_i32(ptr %ptr)
 
 ; CHECK: .globaltype	__stack_pointer, i[[PTR]]{{$}}
 
@@ -19,7 +19,7 @@ define void @alloca32() noredzone {
  ; CHECK: local.get $push[[L4:.+]]=, [[SP]]{{$}}
  ; CHECK: i32.const $push[[L0:.+]]=, 0
  ; CHECK: i32.store 12($pop[[L4]]), $pop[[L0]]
- store i32 0, i32* %retval
+ store i32 0, ptr %retval
  ; CHECK: local.get $push[[L6:.+]]=, [[SP]]{{$}}
  ; CHECK-NEXT: i[[PTR]].const $push[[L5:.+]]=, 16
  ; CHECK-NEXT: i[[PTR]].add $push[[L7:.+]]=, $pop[[L6]], $pop[[L5]]
@@ -36,8 +36,8 @@ define void @alloca3264() {
  ; CHECK-NEXT: local.tee $push[[L5:.+]]=, [[SP:.+]], $pop[[L6]]
  %r1 = alloca i32
  %r2 = alloca double
- store i32 0, i32* %r1
- store double 0.0, double* %r2
+ store i32 0, ptr %r1
+ store double 0.0, ptr %r2
  ; CHECK-NEXT: i64.const $push[[L1:.+]]=, 0
  ; CHECK-NEXT: i64.store 0($pop[[L5]]), $pop[[L1]]
  ; CHECK-NEXT: local.get $push[[L2:.+]]=, [[SP]]{{$}}
@@ -64,10 +64,9 @@ define void @allocarray() {
  ; CHECK-NEXT: local.get $push[[L4:.+]]=, 0{{$}}
  ; CHECK-NEXT: i32.const $push[[L10:.+]]=, 1{{$}}
  ; CHECK-NEXT: i32.store 12($pop[[L4]]), $pop[[L10]]{{$}}
- %p = getelementptr [33 x i32], [33 x i32]* %r, i32 0, i32 0
- store i32 1, i32* %p
- %p2 = getelementptr [33 x i32], [33 x i32]* %r, i32 0, i32 3
- store i32 1, i32* %p2
+ store i32 1, ptr %r
+ %p2 = getelementptr [33 x i32], ptr %r, i32 0, i32 3
+ store i32 1, ptr %p2
 
  ; CHECK-NEXT: local.get $push[[L2:.+]]=, [[SP]]{{$}}
  ; CHECK-NEXT: i[[PTR]].const $push[[L7:.+]]=, 144
@@ -77,7 +76,7 @@ define void @allocarray() {
 }
 
 ; CHECK-LABEL: non_mem_use
-define void @non_mem_use(i8** %addr) {
+define void @non_mem_use(ptr %addr) {
  ; CHECK: i[[PTR]].const $push[[L2:.+]]=, 48
  ; CHECK-NEXT: i[[PTR]].sub $push[[L12:.+]]=, {{.+}}, $pop[[L2]]
  ; CHECK-NEXT: local.tee $push[[L11:.+]]=, [[SP:.+]], $pop[[L12]]
@@ -90,19 +89,18 @@ define void @non_mem_use(i8** %addr) {
  ; CHECK: i[[PTR]].const $push[[OFF:.+]]=, 8
  ; CHECK-NEXT: i[[PTR]].add $push[[ARG1:.+]]=, $pop[[L3]], $pop[[OFF]]
  ; CHECK-NEXT: call ext_func, $pop[[ARG1]]
- call void @ext_func(i64* %r)
+ call void @ext_func(ptr %r)
  ; %r2 is at SP+0, no add needed
  ; CHECK: local.get $push[[L4:.+]]=, [[SP]]
  ; CHECK-NEXT: call ext_func, $pop[[L4]]
- call void @ext_func(i64* %r2)
+ call void @ext_func(ptr %r2)
  ; Use as a value, but in a store
  ; %buf is at SP+16
  ; CHECK: local.get $push[[L5:.+]]=, [[SP]]
  ; CHECK: i[[PTR]].const $push[[OFF:.+]]=, 16
  ; CHECK-NEXT: i[[PTR]].add $push[[VAL:.+]]=, $pop[[L5]], $pop[[OFF]]
  ; CHECK-NEXT: i[[PTR]].store 0($pop{{.+}}), $pop[[VAL]]
- %gep = getelementptr inbounds [27 x i8], [27 x i8]* %buf, i32 0, i32 0
- store i8* %gep, i8** %addr
+ store ptr %buf, ptr %addr
  ret void
 }
 
@@ -117,13 +115,12 @@ define void @allocarray_inbounds() {
  %r = alloca [5 x i32]
  ; CHECK: i32.const $push[[L3:.+]]=, 1
  ; CHECK-DAG: i32.store 24(${{.+}}), $pop[[L3]]
- %p = getelementptr inbounds [5 x i32], [5 x i32]* %r, i32 0, i32 0
- store i32 1, i32* %p
+ store i32 1, ptr %r
  ; This store should have both the GEP and the FI folded into it.
  ; CHECK-DAG: i32.store 12(${{.+}}), $pop
- %p2 = getelementptr inbounds [5 x i32], [5 x i32]* %r, i32 0, i32 3
- store i32 1, i32* %p2
- call void @ext_func(i64* null);
+ %p2 = getelementptr inbounds [5 x i32], ptr %r, i32 0, i32 3
+ store i32 1, ptr %p2
+ call void @ext_func(ptr null);
  ; CHECK: call ext_func
  ; CHECK: i[[PTR]].const $push[[L5:.+]]=, 32{{$}}
  ; CHECK-NEXT: i[[PTR]].add $push[[L7:.+]]=, ${{.+}}, $pop[[L5]]
@@ -142,7 +139,7 @@ define void @dynamic_alloca(i32 %alloc) {
  %r = alloca i32, i32 %alloc
  ; Target-independent codegen also calculates the store addr
  ; CHECK: call ext_func_i32
- call void @ext_func_i32(i32* %r)
+ call void @ext_func_i32(ptr %r)
  ; CHECK: global.set __stack_pointer, $pop{{.+}}
  ret void
 }
@@ -158,7 +155,7 @@ define void @dynamic_alloca_redzone(i32 %alloc) {
  ; CHECK: local.get $push[[L7:.+]]=, [[SP2]]{{$}}
  ; CHECK-NEXT: i32.const $push[[L6:.+]]=, 0{{$}}
  ; CHECK-NEXT: i32.store 0($pop[[L7]]), $pop[[L6]]{{$}}
- store i32 0, i32* %r
+ store i32 0, ptr %r
  ; CHECK-NEXT: return
  ret void
 }
@@ -178,7 +175,7 @@ define void @dynamic_static_alloca(i32 %alloc) noredzone {
  ; CHECK-NEXT: i32.const $push[[L0:.+]]=, 101
  ; CHECK-NEXT: i32.store [[static_offset:.+]]($pop[[pushedFP]]), $pop[[L0]]
  %static = alloca i32
- store volatile i32 101, i32* %static
+ store volatile i32 101, ptr %static
 
  ; Decrement SP in the body by the dynamic amount.
  ; CHECK: i[[PTR]].sub
@@ -197,8 +194,8 @@ define void @dynamic_static_alloca(i32 %alloc) noredzone {
  ; CHECK-NEXT: local.get $push[[L9:.+]]=, [[dynamic_local]]{{$}}
  ; CHECK-NEXT: i32.const $push[[L8:.+]]=, 103
  ; CHECK-NEXT: i32.store 0($pop[[L9]]), $pop[[L8]]
- store volatile i32 102, i32* %static
- store volatile i32 103, i32* %dynamic
+ store volatile i32 102, ptr %static
+ store volatile i32 103, ptr %dynamic
 
  ; Decrement SP in the body by the dynamic amount.
  ; CHECK: i[[PTR]].sub
@@ -218,9 +215,9 @@ define void @dynamic_static_alloca(i32 %alloc) noredzone {
  ; CHECK-NEXT: local.get $push[[L23:.+]]=, [[dynamic2_local]]
  ; CHECK-NEXT: i32.const $push[[L11:.+]]=, 106
  ; CHECK-NEXT: i32.store 0($pop[[L23]]), $pop[[L11]]
- store volatile i32 104, i32* %static
- store volatile i32 105, i32* %dynamic
- store volatile i32 106, i32* %dynamic.2
+ store volatile i32 104, ptr %static
+ store volatile i32 105, ptr %dynamic
+ store volatile i32 106, ptr %dynamic.2
 
  ; Writeback to memory.
  ; CHECK: local.get $push[[L24:.+]]=, [[FP]]{{$}}
@@ -230,15 +227,15 @@ define void @dynamic_static_alloca(i32 %alloc) noredzone {
  ret void
 }
 
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
 
 ; CHECK-LABEL: llvm_stack_builtins:
 define void @llvm_stack_builtins(i32 %alloc) noredzone {
  ; CHECK: global.get $push[[L11:.+]]=, __stack_pointer{{$}}
  ; CHECK-NEXT: local.tee $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
  ; CHECK-NEXT: local.set [[STACK:.+]], $pop[[L10]]
- %stack = call i8* @llvm.stacksave()
+ %stack = call ptr @llvm.stacksave()
 
  ; Ensure we don't reassign the stacksave local
  ; CHECK-NOT: local.set [[STACK]],
@@ -246,7 +243,7 @@ define void @llvm_stack_builtins(i32 %alloc) noredzone {
 
  ; CHECK: local.get $push[[L12:.+]]=, [[STACK]]
  ; CHECK-NEXT: global.set __stack_pointer, $pop[[L12]]
- call void @llvm.stackrestore(i8* %stack)
+ call void @llvm.stackrestore(ptr %stack)
 
  ret void
 }
@@ -271,7 +268,7 @@ define void @dynamic_alloca_nouse(i32 %alloc) noredzone {
 ; The use of the alloca in a phi causes a CopyToReg DAG node to be generated,
 ; which has to have special handling because CopyToReg can't have a FI operand
 ; CHECK-LABEL: copytoreg_fi:
-define void @copytoreg_fi(i1 %cond, i32* %b) {
+define void @copytoreg_fi(i1 %cond, ptr %b) {
 entry:
  ; CHECK: i[[PTR]].const $push[[L1:.+]]=, 16
  ; CHECK-NEXT: i[[PTR]].sub $push[[L3:.+]]=, {{.+}}, $pop[[L1]]
@@ -281,8 +278,8 @@ entry:
  ; CHECK-NEXT: local.set [[COPY:.+]], $pop[[ADDR]]
  br label %body
 body:
- %a = phi i32* [%addr, %entry], [%b, %body]
- store i32 1, i32* %a
+ %a = phi ptr [%addr, %entry], [%b, %body]
+ store i32 1, ptr %a
  ; CHECK: local.get $push[[L12:.+]]=, [[COPY]]
  ; CHECK: i32.store 0($pop[[L12]]),
  br i1 %cond, label %body, label %exit
@@ -290,8 +287,8 @@ exit:
  ret void
 }
 
-declare void @use_i8_star(i8*)
-declare i8* @llvm.frameaddress(i32)
+declare void @use_i8_star(ptr)
+declare ptr @llvm.frameaddress(i32)
 
 ; Test __builtin_frame_address(0).
 ; CHECK-LABEL: frameaddress_0:
@@ -301,8 +298,8 @@ declare i8* @llvm.frameaddress(i32)
 ; CHECK-NEXT: local.get $push[[L5:.+]]=, [[FP]]
 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L5]]
 define void @frameaddress_0() {
-  %t = call i8* @llvm.frameaddress(i32 0)
-  call void @use_i8_star(i8* %t)
+  %t = call ptr @llvm.frameaddress(i32 0)
+  call void @use_i8_star(ptr %t)
   ret void
 }
 
@@ -313,8 +310,8 @@ define void @frameaddress_0() {
 ; CHECK-NEXT: call use_i8_star, $pop0{{$}}
 ; CHECK-NEXT: return{{$}}
 define void @frameaddress_1() {
-  %t = call i8* @llvm.frameaddress(i32 1)
-  call void @use_i8_star(i8* %t)
+  %t = call ptr @llvm.frameaddress(i32 1)
+  call void @use_i8_star(ptr %t)
   ret void
 }
 
@@ -326,7 +323,7 @@ define void @frameaddress_1() {
 ; CHECK-NEXT:  #NO_APP
 define void @inline_asm() {
   %tmp = alloca i8
-  call void asm sideeffect "# %0", "r"(i8* %tmp)
+  call void asm sideeffect "# %0", "r"(ptr %tmp)
   ret void
 }
 
@@ -338,10 +335,10 @@ define void @inline_asm() {
 @str = local_unnamed_addr global [3 x i8] c"abc", align 16
 define i8 @frame_offset_with_global_address() {
   %1 = alloca i8, align 4
-  %2 = ptrtoint i8* %1 to i32
+  %2 = ptrtoint ptr %1 to i32
   ;; Here @str is a global address and not an immediate, so cannot be folded
-  %3 = getelementptr [3 x i8], [3 x i8]* @str, i32 0, i32 %2
-  %4 = load i8, i8* %3, align 8
+  %3 = getelementptr [3 x i8], ptr @str, i32 0, i32 %2
+  %4 = load i8, ptr %3, align 8
   %5 = and i8 %4, 67
   ret i8 %5
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/varargs.ll b/llvm/test/CodeGen/WebAssembly/varargs.ll
index cb582a4a58f5..2944936192b8 100644
--- a/llvm/test/CodeGen/WebAssembly/varargs.ll
+++ b/llvm/test/CodeGen/WebAssembly/varargs.ll
@@ -11,12 +11,11 @@ target triple = "wasm32-unknown-emscripten"
 ; CHECK-LABEL: start:
 ; CHECK-NEXT: .functype start (i32, i32) -> ()
 ; CHECK-NOT: __stack_pointer
-define void @start(i8** %ap, ...) {
+define void @start(ptr %ap, ...) {
 entry:
-  %0 = bitcast i8** %ap to i8*
 ; Store the second argument (the hidden vararg buffer pointer) into ap
 ; CHECK: i32.store 0($0), $1
-  call void @llvm.va_start(i8* %0)
+  call void @llvm.va_start(ptr %ap)
   ret void
 }
 
@@ -25,10 +24,9 @@ entry:
 ; CHECK-LABEL: end:
 ; CHECK-NEXT: .functype end (i32) -> (){{$}}
 ; CHECK-NEXT: return{{$}}
-define void @end(i8** %ap) {
+define void @end(ptr %ap) {
 entry:
-  %0 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %0)
+  call void @llvm.va_end(ptr %ap)
   ret void
 }
 
@@ -39,11 +37,9 @@ entry:
 ; CHECK-NEXT: i32.load  $push0=, 0($1){{$}}
 ; CHECK-NEXT: i32.store 0($0), $pop0{{$}}
 ; CHECK-NEXT: return{{$}}
-define void @copy(i8** %ap, i8** %bp) {
+define void @copy(ptr %ap, ptr %bp) {
 entry:
-  %0 = bitcast i8** %ap to i8*
-  %1 = bitcast i8** %bp to i8*
-  call void @llvm.va_copy(i8* %0, i8* %1)
+  call void @llvm.va_copy(ptr %ap, ptr %bp)
   ret void
 }
 
@@ -58,9 +54,9 @@ entry:
 ; CHECK-NEXT: i32.store  0($0), $pop[[NUM3]]{{$}}
 ; CHECK-NEXT: i32.load   $push[[NUM4:[0-9]+]]=, 0($1){{$}}
 ; CHECK-NEXT: return     $pop[[NUM4]]{{$}}
-define i8 @arg_i8(i8** %ap) {
+define i8 @arg_i8(ptr %ap) {
 entry:
-  %t = va_arg i8** %ap, i8
+  %t = va_arg ptr %ap, i8
   ret i8 %t
 }
 
@@ -79,9 +75,9 @@ entry:
 ; CHECK-NEXT: i32.store  0($0), $pop[[NUM7]]{{$}}
 ; CHECK-NEXT: i32.load   $push[[NUM8:[0-9]+]]=, 0($1){{$}}
 ; CHECK-NEXT: return     $pop[[NUM8]]{{$}}
-define i32 @arg_i32(i8** %ap) {
+define i32 @arg_i32(ptr %ap) {
 entry:
-  %t = va_arg i8** %ap, i32
+  %t = va_arg ptr %ap, i32
   ret i32 %t
 }
 
@@ -93,9 +89,9 @@ entry:
 ; CHECK: i64.load
 ; CHECK: i64.load
 ; CHECK: return{{$}}
-define i128 @arg_i128(i8** %ap) {
+define i128 @arg_i128(ptr %ap) {
 entry:
-  %t = va_arg i8** %ap, i128
+  %t = va_arg ptr %ap, i128
   ret i128 %t
 }
 
@@ -128,16 +124,15 @@ define void @caller_some() {
 ; Test a va_start call in a non-entry block
 ; CHECK-LABEL: startbb:
 ; CHECK: .functype startbb (i32, i32, i32) -> ()
-define void @startbb(i1 %cond, i8** %ap, ...) {
+define void @startbb(i1 %cond, ptr %ap, ...) {
 entry:
   br i1 %cond, label %bb0, label %bb1
 bb0:
   ret void
 bb1:
-  %0 = bitcast i8** %ap to i8*
 ; Store the second argument (the hidden vararg buffer pointer) into ap
 ; CHECK: i32.store 0($1), $2
-  call void @llvm.va_start(i8* %0)
+  call void @llvm.va_start(ptr %ap)
   ret void
 }
 
@@ -200,12 +195,12 @@ define void @nonlegal_fixed(fp128 %x, ...) nounwind {
 ; UNKNOWN-NEXT: i32.const       $push6=, 7
 ; UNKNOWN-NEXT: i32.store       0($1), $pop6
 ; UNKNOWN-NEXT: call            callee, $1
-define void @call_fp128_alignment(i8* %p) {
+define void @call_fp128_alignment(ptr %p) {
 entry:
   call void (...) @callee(i8 7, fp128 0xL00000000000000018000000000000000)
   ret void
 }
 
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_end(i8*)
-declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)
+declare void @llvm.va_copy(ptr, ptr)

diff  --git a/llvm/test/CodeGen/WebAssembly/vector-sdiv.ll b/llvm/test/CodeGen/WebAssembly/vector-sdiv.ll
index 4739b3c5c376..6c1fb1ddddad 100644
--- a/llvm/test/CodeGen/WebAssembly/vector-sdiv.ll
+++ b/llvm/test/CodeGen/WebAssembly/vector-sdiv.ll
@@ -14,10 +14,10 @@ target triple = "wasm32-unknown-unknown"
 ; CHECK-DAG:  i32.store
 ; CHECK-DAG:  i32.shr_u
 ; CHECK-DAG:  i32.store
-define void @vector_sdiv(<4 x i32>* %x, <4 x i32>* readonly %y) {
+define void @vector_sdiv(ptr %x, ptr readonly %y) {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %y, align 16
+  %0 = load <4 x i32>, ptr %y, align 16
   %div = sdiv <4 x i32> %0, <i32 1, i32 4, i32 2, i32 8>
-  store <4 x i32> %div, <4 x i32>* %x, align 16
+  store <4 x i32> %div, ptr %x, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/WebAssembly/vtable.ll b/llvm/test/CodeGen/WebAssembly/vtable.ll
index 644d864a3bc9..58a6e00ddb2f 100644
--- a/llvm/test/CodeGen/WebAssembly/vtable.ll
+++ b/llvm/test/CodeGen/WebAssembly/vtable.ll
@@ -13,13 +13,13 @@
 
 target triple = "wasm32-unknown-unknown"
 
-%struct.A = type { i32 (...)** }
+%struct.A = type { ptr }
 %struct.B = type { %struct.A }
 %struct.C = type { %struct.A }
 %struct.D = type { %struct.B }
 
- at _ZTVN10__cxxabiv117__class_type_infoE = external global i8*
- at _ZTVN10__cxxabiv120__si_class_type_infoE = external global i8*
+ at _ZTVN10__cxxabiv117__class_type_infoE = external global ptr
+ at _ZTVN10__cxxabiv120__si_class_type_infoE = external global ptr
 
 ; TYPEINFONAME-LABEL: _ZTS1A:
 ; TYPEINFONAME-NEXT: .asciz "1A"
@@ -44,7 +44,7 @@ target triple = "wasm32-unknown-unknown"
 ; VTABLE-NEXT:  .int32 _ZN1AD0Ev
 ; VTABLE-NEXT:  .int32 _ZN1A3fooEv
 ; VTABLE-NEXT:  .size _ZTV1A, 20
- at _ZTV1A = constant [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI1A to i8*), i8* bitcast (%struct.A* (%struct.A*)* @_ZN1AD2Ev to i8*), i8* bitcast (void (%struct.A*)* @_ZN1AD0Ev to i8*), i8* bitcast (void (%struct.A*)* @_ZN1A3fooEv to i8*)], align 4
+ at _ZTV1A = constant [5 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1AD2Ev, ptr @_ZN1AD0Ev, ptr @_ZN1A3fooEv], align 4
 ; VTABLE:       .type _ZTV1B, at object
 ; VTABLE-NEXT:  .section .rodata._ZTV1B,
 ; VTABLE-NEXT:  .globl _ZTV1B
@@ -55,7 +55,7 @@ target triple = "wasm32-unknown-unknown"
 ; VTABLE-NEXT:  .int32 _ZN1BD0Ev
 ; VTABLE-NEXT:  .int32 _ZN1B3fooEv
 ; VTABLE-NEXT:  .size _ZTV1B, 20
- at _ZTV1B = constant [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI1B to i8*), i8* bitcast (%struct.A* (%struct.A*)* @_ZN1AD2Ev to i8*), i8* bitcast (void (%struct.B*)* @_ZN1BD0Ev to i8*), i8* bitcast (void (%struct.B*)* @_ZN1B3fooEv to i8*)], align 4
+ at _ZTV1B = constant [5 x ptr] [ptr null, ptr @_ZTI1B, ptr @_ZN1AD2Ev, ptr @_ZN1BD0Ev, ptr @_ZN1B3fooEv], align 4
 ; VTABLE:       .type _ZTV1C, at object
 ; VTABLE-NEXT:  .section .rodata._ZTV1C,
 ; VTABLE-NEXT:  .globl _ZTV1C
@@ -66,7 +66,7 @@ target triple = "wasm32-unknown-unknown"
 ; VTABLE-NEXT:  .int32 _ZN1CD0Ev
 ; VTABLE-NEXT:  .int32 _ZN1C3fooEv
 ; VTABLE-NEXT:  .size _ZTV1C, 20
- at _ZTV1C = constant [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI1C to i8*), i8* bitcast (%struct.A* (%struct.A*)* @_ZN1AD2Ev to i8*), i8* bitcast (void (%struct.C*)* @_ZN1CD0Ev to i8*), i8* bitcast (void (%struct.C*)* @_ZN1C3fooEv to i8*)], align 4
+ at _ZTV1C = constant [5 x ptr] [ptr null, ptr @_ZTI1C, ptr @_ZN1AD2Ev, ptr @_ZN1CD0Ev, ptr @_ZN1C3fooEv], align 4
 ; VTABLE:       .type _ZTV1D, at object
 ; VTABLE-NEXT:  .section .rodata._ZTV1D,
 ; VTABLE-NEXT:  .globl _ZTV1D
@@ -77,7 +77,7 @@ target triple = "wasm32-unknown-unknown"
 ; VTABLE-NEXT:  .int32 _ZN1DD0Ev
 ; VTABLE-NEXT:  .int32 _ZN1D3fooEv
 ; VTABLE-NEXT:  .size _ZTV1D, 20
- at _ZTV1D = constant [5 x i8*] [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI1D to i8*), i8* bitcast (%struct.A* (%struct.A*)* @_ZN1AD2Ev to i8*), i8* bitcast (void (%struct.D*)* @_ZN1DD0Ev to i8*), i8* bitcast (void (%struct.D*)* @_ZN1D3fooEv to i8*)], align 4
+ at _ZTV1D = constant [5 x ptr] [ptr null, ptr @_ZTI1D, ptr @_ZN1AD2Ev, ptr @_ZN1DD0Ev, ptr @_ZN1D3fooEv], align 4
 
 ; TYPEINFO:       .type _ZTI1A, at object
 ; TYPEINFO:       .globl _ZTI1A
@@ -85,7 +85,7 @@ target triple = "wasm32-unknown-unknown"
 ; TYPEINFO-NEXT:  .int32 _ZTVN10__cxxabiv117__class_type_infoE+8
 ; TYPEINFO-NEXT:  .int32 _ZTS1A
 ; TYPEINFO-NEXT:  .size _ZTI1A, 8
- at _ZTI1A = constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i32 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1A, i32 0, i32 0) }
+ at _ZTI1A = constant { ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv117__class_type_infoE, i32 2), ptr @_ZTS1A }
 ; TYPEINFO:       .type _ZTI1B, at object
 ; TYPEINFO:       .globl _ZTI1B
 ; TYPEINFO-LABEL: _ZTI1B:
@@ -93,7 +93,7 @@ target triple = "wasm32-unknown-unknown"
 ; TYPEINFO-NEXT:  .int32 _ZTS1B
 ; TYPEINFO-NEXT:  .int32 _ZTI1A
 ; TYPEINFO-NEXT:  .size _ZTI1B, 12
- at _ZTI1B = constant { i8*, i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv120__si_class_type_infoE, i32 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1B, i32 0, i32 0), i8* bitcast ({ i8*, i8* }* @_ZTI1A to i8*) }
+ at _ZTI1B = constant { ptr, ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i32 2), ptr @_ZTS1B, ptr @_ZTI1A }
 ; TYPEINFO:       .type _ZTI1C, at object
 ; TYPEINFO:       .globl _ZTI1C
 ; TYPEINFO-LABEL: _ZTI1C:
@@ -101,7 +101,7 @@ target triple = "wasm32-unknown-unknown"
 ; TYPEINFO-NEXT:  .int32 _ZTS1C
 ; TYPEINFO-NEXT:  .int32 _ZTI1A
 ; TYPEINFO-NEXT:  .size _ZTI1C, 12
- at _ZTI1C = constant { i8*, i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv120__si_class_type_infoE, i32 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1C, i32 0, i32 0), i8* bitcast ({ i8*, i8* }* @_ZTI1A to i8*) }
+ at _ZTI1C = constant { ptr, ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i32 2), ptr @_ZTS1C, ptr @_ZTI1A }
 ; TYPEINFO:       .type _ZTI1D, at object
 ; TYPEINFO:       .globl _ZTI1D
 ; TYPEINFO-LABEL: _ZTI1D:
@@ -109,65 +109,61 @@ target triple = "wasm32-unknown-unknown"
 ; TYPEINFO-NEXT:  .int32 _ZTS1D
 ; TYPEINFO-NEXT:  .int32 _ZTI1B
 ; TYPEINFO-NEXT:  .size _ZTI1D, 12
- at _ZTI1D = constant { i8*, i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv120__si_class_type_infoE, i32 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1D, i32 0, i32 0), i8* bitcast ({ i8*, i8*, i8* }* @_ZTI1B to i8*) }
+ at _ZTI1D = constant { ptr, ptr, ptr } { ptr getelementptr inbounds (ptr, ptr @_ZTVN10__cxxabiv120__si_class_type_infoE, i32 2), ptr @_ZTS1D, ptr @_ZTI1B }
 
 @g = global i32 0, align 4
 
-define void @_ZN1A3fooEv(%struct.A* %this) {
+define void @_ZN1A3fooEv(ptr %this) {
 entry:
-  store i32 2, i32* @g, align 4
+  store i32 2, ptr @g, align 4
   ret void
 }
 
-define void @_ZN1B3fooEv(%struct.B* %this) {
+define void @_ZN1B3fooEv(ptr %this) {
 entry:
-  store i32 4, i32* @g, align 4
+  store i32 4, ptr @g, align 4
   ret void
 }
 
-define void @_ZN1C3fooEv(%struct.C* %this) {
+define void @_ZN1C3fooEv(ptr %this) {
 entry:
-  store i32 6, i32* @g, align 4
+  store i32 6, ptr @g, align 4
   ret void
 }
 
-define void @_ZN1D3fooEv(%struct.D* %this) {
+define void @_ZN1D3fooEv(ptr %this) {
 entry:
-  store i32 8, i32* @g, align 4
+  store i32 8, ptr @g, align 4
   ret void
 }
 
-define linkonce_odr void @_ZN1AD0Ev(%struct.A* %this) {
+define linkonce_odr void @_ZN1AD0Ev(ptr %this) {
 entry:
-  %0 = bitcast %struct.A* %this to i8*
-  tail call void @_ZdlPv(i8* %0)
+  tail call void @_ZdlPv(ptr %this)
   ret void
 }
 
-define linkonce_odr void @_ZN1BD0Ev(%struct.B* %this) {
+define linkonce_odr void @_ZN1BD0Ev(ptr %this) {
 entry:
-  %0 = bitcast %struct.B* %this to i8*
-  tail call void @_ZdlPv(i8* %0)
+  tail call void @_ZdlPv(ptr %this)
   ret void
 }
 
-define linkonce_odr void @_ZN1CD0Ev(%struct.C* %this) {
+define linkonce_odr void @_ZN1CD0Ev(ptr %this) {
 entry:
-  %0 = bitcast %struct.C* %this to i8*
-  tail call void @_ZdlPv(i8* %0)
+  tail call void @_ZdlPv(ptr %this)
   ret void
 }
 
-define linkonce_odr %struct.A* @_ZN1AD2Ev(%struct.A* returned %this) {
+define linkonce_odr ptr @_ZN1AD2Ev(ptr returned %this) {
 entry:
-  ret %struct.A* %this
+  ret ptr %this
 }
 
-define linkonce_odr void @_ZN1DD0Ev(%struct.D* %this) {
+define linkonce_odr void @_ZN1DD0Ev(ptr %this) {
 entry:
-  %0 = bitcast %struct.D* %this to i8*
-  tail call void @_ZdlPv(i8* %0)
+  tail call void @_ZdlPv(ptr %this)
   ret void
 }
 
-declare void @_ZdlPv(i8*)
+declare void @_ZdlPv(ptr)

diff  --git a/llvm/test/CodeGen/WebAssembly/wasm-eh-em-sjlj-error.ll b/llvm/test/CodeGen/WebAssembly/wasm-eh-em-sjlj-error.ll
index 21959af25ca5..35637a996e3a 100644
--- a/llvm/test/CodeGen/WebAssembly/wasm-eh-em-sjlj-error.ll
+++ b/llvm/test/CodeGen/WebAssembly/wasm-eh-em-sjlj-error.ll
@@ -4,12 +4,10 @@ target triple = "wasm32-unknown-unknown"
 
 %struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
 
-define void @wasm_eh_emscripten_sjlj_error() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @wasm_eh_emscripten_sjlj_error() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
-  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %buf, i32 0, i32 0
-  %call = call i32 @setjmp(%struct.__jmp_buf_tag* %arraydecay) #0
-  %arraydecay1 = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %buf, i32 0, i32 0
+  %call = call i32 @setjmp(ptr %buf) #0
   br label %bb
 
 bb:
@@ -20,10 +18,10 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr null]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i8* @__cxa_begin_catch(i8* %2) #2 [ "funclet"(token %1) ]
+  %4 = call ptr @__cxa_begin_catch(ptr %2) #2 [ "funclet"(token %1) ]
   call void @__cxa_end_catch() [ "funclet"(token %1) ]
   catchret from %1 to label %try.cont
 
@@ -34,17 +32,17 @@ try.cont:                                         ; preds = %entry, %catch.start
 declare void @foo()
 declare i32 @__gxx_wasm_personality_v0(...)
 ; Function Attrs: nounwind
-declare i8* @llvm.wasm.get.exception(token) #0
+declare ptr @llvm.wasm.get.exception(token) #0
 ; Function Attrs: nounwind
 declare i32 @llvm.wasm.get.ehselector(token) #0
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 declare void @__cxa_end_catch()
 ; Function Attrs: returns_twice
-declare i32 @setjmp(%struct.__jmp_buf_tag*) #0
+declare i32 @setjmp(ptr) #0
 ; Function Attrs: noreturn
-declare void @longjmp(%struct.__jmp_buf_tag*, i32) #1
-declare i8* @malloc(i32)
-declare void @free(i8*)
+declare void @longjmp(ptr, i32) #1
+declare ptr @malloc(i32)
+declare void @free(ptr)
 
 attributes #0 = { returns_twice }
 attributes #1 = { noreturn }

diff  --git a/llvm/test/CodeGen/WebAssembly/wasm-eh-sjlj-setjmp-within-catch.ll b/llvm/test/CodeGen/WebAssembly/wasm-eh-sjlj-setjmp-within-catch.ll
index cc6ab50c986b..f0d622ad333c 100644
--- a/llvm/test/CodeGen/WebAssembly/wasm-eh-sjlj-setjmp-within-catch.ll
+++ b/llvm/test/CodeGen/WebAssembly/wasm-eh-sjlj-setjmp-within-catch.ll
@@ -4,9 +4,9 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
 target triple = "wasm32-unknown-unknown"
 
 %struct.__jmp_buf_tag = type { [6 x i32], i32, [32 x i32] }
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
 
-define void @setjmp_within_catch() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+define void @setjmp_within_catch() personality ptr @__gxx_wasm_personality_v0 {
 entry:
   %buf = alloca [1 x %struct.__jmp_buf_tag], align 16
   invoke void @foo()
@@ -16,21 +16,19 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch.start] unwind to caller
 
 catch.start:                                      ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*)]
-  %2 = call i8* @llvm.wasm.get.exception(token %1)
+  %1 = catchpad within %0 [ptr @_ZTIi]
+  %2 = call ptr @llvm.wasm.get.exception(token %1)
   %3 = call i32 @llvm.wasm.get.ehselector(token %1)
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #0
+  %4 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) #0
   %matches = icmp eq i32 %3, %4
   br i1 %matches, label %catch, label %rethrow
 
 catch:                                            ; preds = %catch.start
-  %5 = call i8* @__cxa_begin_catch(i8* %2) #0 [ "funclet"(token %1) ]
-  %6 = bitcast i8* %5 to i32*
-  %7 = load i32, i32* %6, align 4
-  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %buf, i32 0, i32 0
+  %5 = call ptr @__cxa_begin_catch(ptr %2) #0 [ "funclet"(token %1) ]
+  %6 = load i32, ptr %5, align 4
 ; CHECK: LLVM ERROR: In function setjmp_within_catch: setjmp within a catch clause is not supported in Wasm EH
 ; CHECK-NEXT: %call = invoke i32 @setjmp
-  %call = invoke i32 @setjmp(%struct.__jmp_buf_tag* noundef %arraydecay) #2 [ "funclet"(token %1) ]
+  %call = invoke i32 @setjmp(ptr noundef %buf) #2 [ "funclet"(token %1) ]
           to label %invoke.cont1 unwind label %ehcleanup
 
 invoke.cont1:                                     ; preds = %catch
@@ -45,24 +43,24 @@ try.cont:                                         ; preds = %entry, %invoke.cont
   ret void
 
 ehcleanup:                                        ; preds = %catch
-  %8 = cleanuppad within %1 []
-  call void @__cxa_end_catch() #0 [ "funclet"(token %8) ]
-  cleanupret from %8 unwind to caller
+  %7 = cleanuppad within %1 []
+  call void @__cxa_end_catch() #0 [ "funclet"(token %7) ]
+  cleanupret from %7 unwind to caller
 }
 
 declare void @foo()
 declare i32 @__gxx_wasm_personality_v0(...)
 ; Function Attrs: nounwind
-declare i8* @llvm.wasm.get.exception(token) #0
+declare ptr @llvm.wasm.get.exception(token) #0
 ; Function Attrs: nounwind
 declare i32 @llvm.wasm.get.ehselector(token) #0
 ; Function Attrs: nounwind
-declare i32 @llvm.eh.typeid.for(i8*) #0
+declare i32 @llvm.eh.typeid.for(ptr) #0
 ; Function Attrs: noreturn
 declare void @llvm.wasm.rethrow() #1
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 ; Function Attrs: returns_twice
-declare i32 @setjmp(%struct.__jmp_buf_tag* noundef) #2
+declare i32 @setjmp(ptr noundef) #2
 declare void @__cxa_end_catch()
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/WebAssembly/weak.ll b/llvm/test/CodeGen/WebAssembly/weak.ll
index a1555b69b8cb..af4c674c6e49 100644
--- a/llvm/test/CodeGen/WebAssembly/weak.ll
+++ b/llvm/test/CodeGen/WebAssembly/weak.ll
@@ -17,7 +17,7 @@ define void @g() {
 ; CHECK: bar:
 ; CHECK:   .int32 foo
 ; CHECK:   .size bar, 4
- at bar = global i32* @foo
+ at bar = global ptr @foo
 
 ; CHECK: .weak h
 declare extern_weak void @h()


        


More information about the llvm-commits mailing list