[llvm] SPIRV: Convert tests to opaque pointers (PR #174563)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 6 02:59:35 PST 2026


https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/174563

None

>From 72f9a5e62d333d18b898c9cd3de1e2f58345c631 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 6 Jan 2026 11:32:22 +0100
Subject: [PATCH] SPIRV: Convert tests to opaque pointers

---
 .../test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll |  44 +-
 llvm/test/CodeGen/SPIRV/ComparePointers.ll    |  30 +-
 llvm/test/CodeGen/SPIRV/EnqueueEmptyKernel.ll |  24 +-
 llvm/test/CodeGen/SPIRV/ExecutionMode.ll      |  72 +-
 .../CodeGen/SPIRV/FOrdGreaterThanEqual_int.ll |   4 +-
 .../SpecConstants/bool-spirv-specconstant.ll  |  16 +-
 llvm/test/CodeGen/SPIRV/TruncToBool.ll        |   4 +-
 llvm/test/CodeGen/SPIRV/atomicrmw.ll          |  22 +-
 .../CodeGen/SPIRV/basic_int_types_spirvdis.ll |  24 +-
 .../CodeGen/SPIRV/branching/OpSwitch32.ll     |  26 +-
 .../CodeGen/SPIRV/branching/OpSwitch64.ll     |  34 +-
 .../CodeGen/SPIRV/branching/OpSwitchChar.ll   |  22 +-
 .../CodeGen/SPIRV/builtin_intrinsics_32.ll    |  42 +-
 .../CodeGen/SPIRV/builtin_intrinsics_64.ll    |  42 +-
 .../CodeGen/SPIRV/builtin_vars-decorate.ll    |  18 +-
 .../SPIRV/capability-Int64Atomics-store.ll    |   6 +-
 .../CodeGen/SPIRV/capability-Int64Atomics.ll  |   6 +-
 llvm/test/CodeGen/SPIRV/capability-kernel.ll  |  10 +-
 .../SPIRV/constant/global-constants.ll        |   6 +-
 .../SPIRV/debug-info/opname-filtering.ll      |   2 +-
 llvm/test/CodeGen/SPIRV/empty.ll              |   6 +-
 llvm/test/CodeGen/SPIRV/event_no_group_cap.ll |  10 +-
 .../atomicrmw_faddfsub_float.ll               |  10 +-
 .../IntelFPMaxErrorFPMath.ll                  |   6 +-
 .../extensions/SPV_KHR_bit_instructions.ll    |   4 +-
 .../SPV_KHR_float_controls2/decoration.ll     |  92 +-
 .../disabled-on-amd.ll                        |   4 +-
 .../SPIRV/function/alloca-load-store.ll       |  12 +-
 llvm/test/CodeGen/SPIRV/half_extension.ll     |  14 +-
 llvm/test/CodeGen/SPIRV/half_no_extension.ll  |  26 +-
 llvm/test/CodeGen/SPIRV/image-unoptimized.ll  |  28 +-
 .../test/CodeGen/SPIRV/instructions/atomic.ll |  36 +-
 .../SPIRV/instructions/atomic_acqrel.ll       |  36 +-
 .../CodeGen/SPIRV/instructions/atomic_seq.ll  |  36 +-
 .../CodeGen/SPIRV/instructions/bitwise-i1.ll  |  32 +-
 .../test/CodeGen/SPIRV/instructions/ptrcmp.ll |  40 +-
 .../CodeGen/SPIRV/linkage/linkage-types.ll    |  24 +-
 llvm/test/CodeGen/SPIRV/linked-list.ll        |   4 +-
 .../test/CodeGen/SPIRV/llvm-intrinsics/abs.ll |   4 +-
 .../CodeGen/SPIRV/llvm-intrinsics/assume.ll   |  34 +-
 .../llvm-intrinsics/bitreverse_small_type.ll  |  24 +-
 .../CodeGen/SPIRV/llvm-intrinsics/bswap.ll    |  26 +-
 .../CodeGen/SPIRV/llvm-intrinsics/ctpop.ll    |  10 +-
 .../CodeGen/SPIRV/llvm-intrinsics/expect.ll   |  68 +-
 .../llvm-intrinsics/fp-to-int-intrinsics.ll   |  32 +-
 .../SPIRV/llvm-intrinsics/invariant.ll        |  12 +-
 .../SPIRV/llvm-intrinsics/memcpy.align.ll     |  52 +-
 .../CodeGen/SPIRV/llvm-intrinsics/memmove.ll  |  40 +-
 .../CodeGen/SPIRV/llvm-intrinsics/memset.ll   |  32 +-
 .../CodeGen/SPIRV/llvm-intrinsics/sqrt.ll     |   8 +-
 llvm/test/CodeGen/SPIRV/lshr-constexpr.ll     |   4 +-
 llvm/test/CodeGen/SPIRV/multi_md.ll           |  34 +-
 .../SPIRV/opencl/basic/get_global_offset.ll   |  14 +-
 .../opencl/basic/progvar_prog_scope_init.ll   |  70 +-
 .../opencl/basic/progvar_prog_scope_uninit.ll |  92 +-
 .../opencl/device_execution/execute_block.ll  |  52 +-
 llvm/test/CodeGen/SPIRV/opencl/image.ll       |   4 +-
 .../pointers/two-bitcast-or-param-users.ll    |   4 +-
 .../SPIRV/pointers/two-subsequent-bitcasts.ll |   8 +-
 llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll   | 110 +--
 llvm/test/CodeGen/SPIRV/select-builtin.ll     |   4 +-
 llvm/test/CodeGen/SPIRV/simple.ll             | 122 +--
 llvm/test/CodeGen/SPIRV/sitofp-with-bool.ll   |   2 +-
 .../CodeGen/SPIRV/spec_const_decoration.ll    |  14 +-
 llvm/test/CodeGen/SPIRV/spirv-load-store.ll   |  10 +-
 llvm/test/CodeGen/SPIRV/spirv-tools-dis.ll    |  10 +-
 .../SPIRV/spirv_param_decorations_quals.ll    |   2 +-
 llvm/test/CodeGen/SPIRV/store.ll              |  10 +-
 .../AtomicCompareExchangeExplicit_cl20.ll     |  22 +-
 .../SPIRV/transcoding/BitReversePref.ll       |  18 +-
 .../CodeGen/SPIRV/transcoding/BuildNDRange.ll |   4 +-
 .../SPIRV/transcoding/BuildNDRange_2.ll       |  26 +-
 .../CodeGen/SPIRV/transcoding/ConvertPtr.ll   |  10 +-
 .../SPIRV/transcoding/DecorationAlignment.ll  |   2 +-
 .../transcoding/DecorationMaxByteOffset.ll    |   8 +-
 llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll |  20 +-
 .../SPIRV/transcoding/GlobalFunAnnotate.ll    |   2 +-
 .../CodeGen/SPIRV/transcoding/OpAllAny.ll     |   4 +-
 .../SPIRV/transcoding/OpConstantBool.ll       |  10 +-
 llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll  |  12 +-
 .../SPIRV/transcoding/OpGroupAllAny.ll        |   2 +-
 .../SPIRV/transcoding/OpGroupAsyncCopy.ll     |  92 +-
 .../SPIRV/transcoding/OpImageQuerySize.ll     |  16 +-
 .../SPIRV/transcoding/OpImageReadMS.ll        |   6 +-
 .../transcoding/OpImageSampleExplicitLod.ll   |  14 +-
 .../OpPhi_ArgumentsPlaceholders.ll            |  12 +-
 .../transcoding/OpVectorExtractDynamic.ll     |   4 +-
 .../transcoding/OpVectorInsertDynamic_i16.ll  |   4 +-
 .../transcoding/OpenCL/atomic_cmpxchg.ll      |  10 +-
 .../SPIRV/transcoding/OpenCL/atomic_legacy.ll |  10 +-
 .../transcoding/OpenCL/sub_group_mask.ll      |   4 +-
 .../SPIRV/transcoding/RelationalOperators.ll  |  56 +-
 .../transcoding/RelationalOperatorsFUnord.ll  |  10 +-
 .../CodeGen/SPIRV/transcoding/SampledImage.ll |  16 +-
 .../transcoding/SpecConstantComposite.ll      |  30 +-
 .../CodeGen/SPIRV/transcoding/TransFNeg.ll    |  50 +-
 .../SPIRV/transcoding/atomic_load_store.ll    |  16 +-
 .../test/CodeGen/SPIRV/transcoding/bitcast.ll |  10 +-
 .../transcoding/block_w_struct_return.ll      |  90 +-
 .../CodeGen/SPIRV/transcoding/builtin_vars.ll |   6 +-
 .../transcoding/builtin_vars_arithmetics.ll   |  28 +-
 .../SPIRV/transcoding/builtin_vars_opt.ll     |  24 +-
 .../CodeGen/SPIRV/transcoding/clk_event_t.ll  |   8 +-
 .../SPIRV/transcoding/enqueue_kernel.ll       | 404 ++++-----
 .../SPIRV/transcoding/explicit-conversions.ll |  46 +-
 .../SPIRV/transcoding/extract_insert_value.ll |  16 +-
 llvm/test/CodeGen/SPIRV/transcoding/fadd.ll   |  30 +-
 llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll |   6 +-
 llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll   |  16 +-
 llvm/test/CodeGen/SPIRV/transcoding/fmul.ll   |  16 +-
 llvm/test/CodeGen/SPIRV/transcoding/fneg.ll   |  16 +-
 .../fp_contract_reassoc_fast_mode.ll          |  16 +-
 llvm/test/CodeGen/SPIRV/transcoding/frem.ll   |  16 +-
 llvm/test/CodeGen/SPIRV/transcoding/fsub.ll   |  16 +-
 .../CodeGen/SPIRV/transcoding/global_block.ll |  34 +-
 .../CodeGen/SPIRV/transcoding/group_ops.ll    |  62 +-
 .../test/CodeGen/SPIRV/transcoding/isequal.ll |  14 +-
 llvm/test/CodeGen/SPIRV/transcoding/ldexp.ll  |  18 +-
 .../SPIRV/transcoding/memory_access.ll        |  28 +-
 .../CodeGen/SPIRV/transcoding/readonly.ll     |   2 +-
 .../SPIRV/transcoding/relationals_double.ll   |   8 +-
 .../SPIRV/transcoding/relationals_float.ll    |   8 +-
 .../SPIRV/transcoding/relationals_half.ll     |   8 +-
 .../CodeGen/SPIRV/transcoding/spec_const.ll   |  18 +-
 .../spirv-private-array-initialization.ll     |  10 +-
 .../SPIRV/transcoding/sub_group_ballot.ll     |  48 +-
 .../transcoding/sub_group_clustered_reduce.ll | 284 +++---
 .../transcoding/sub_group_extended_types.ll   | 144 +--
 .../sub_group_non_uniform_arithmetic.ll       | 852 +++++++++---------
 .../transcoding/sub_group_non_uniform_vote.ll |  36 +-
 .../SPIRV/transcoding/sub_group_shuffle.ll    |  88 +-
 .../transcoding/sub_group_shuffle_relative.ll |  88 +-
 llvm/test/CodeGen/SPIRV/transcoding/vec8.ll   |   2 +-
 llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll   |   4 +-
 134 files changed, 2396 insertions(+), 2396 deletions(-)

diff --git a/llvm/test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll b/llvm/test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll
index 88565082ef01e..345f21c260588 100644
--- a/llvm/test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll
+++ b/llvm/test/CodeGen/SPIRV/AtomicBuiltinsFloat.ll
@@ -10,41 +10,41 @@
 ; CHECK-COUNT-3: OpAtomicLoad
 ; CHECK-COUNT-3: OpAtomicExchange
 
-define spir_kernel void @test_atomic_kernel(float addrspace(3)* %ff) local_unnamed_addr #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !5 !kernel_arg_base_type !6 !kernel_arg_type_qual !7 {
+define spir_kernel void @test_atomic_kernel(ptr addrspace(3) %ff) local_unnamed_addr #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !5 !kernel_arg_base_type !6 !kernel_arg_type_qual !7 {
 entry:
-  %0 = addrspacecast float addrspace(3)* %ff to float addrspace(4)*
-  tail call spir_func void @_Z11atomic_initPU3AS4VU7_Atomicff(float addrspace(4)* %0, float 1.000000e+00) #2
-  tail call spir_func void @_Z12atomic_storePU3AS4VU7_Atomicff(float addrspace(4)* %0, float 1.000000e+00) #2
-  tail call spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order(float addrspace(4)* %0, float 1.000000e+00, i32 0) #2
-  tail call spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(float addrspace(4)* %0, float 1.000000e+00, i32 0, i32 1) #2
-  %call = tail call spir_func float @_Z11atomic_loadPU3AS4VU7_Atomicf(float addrspace(4)* %0) #2
-  %call1 = tail call spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order(float addrspace(4)* %0, i32 0) #2
-  %call2 = tail call spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order12memory_scope(float addrspace(4)* %0, i32 0, i32 1) #2
-  %call3 = tail call spir_func float @_Z15atomic_exchangePU3AS4VU7_Atomicff(float addrspace(4)* %0, float 1.000000e+00) #2
-  %call4 = tail call spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order(float addrspace(4)* %0, float 1.000000e+00, i32 0) #2
-  %call5 = tail call spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(float addrspace(4)* %0, float 1.000000e+00, i32 0, i32 1) #2
+  %0 = addrspacecast ptr addrspace(3) %ff to ptr addrspace(4)
+  tail call spir_func void @_Z11atomic_initPU3AS4VU7_Atomicff(ptr addrspace(4) %0, float 1.000000e+00) #2
+  tail call spir_func void @_Z12atomic_storePU3AS4VU7_Atomicff(ptr addrspace(4) %0, float 1.000000e+00) #2
+  tail call spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order(ptr addrspace(4) %0, float 1.000000e+00, i32 0) #2
+  tail call spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(ptr addrspace(4) %0, float 1.000000e+00, i32 0, i32 1) #2
+  %call = tail call spir_func float @_Z11atomic_loadPU3AS4VU7_Atomicf(ptr addrspace(4) %0) #2
+  %call1 = tail call spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order(ptr addrspace(4) %0, i32 0) #2
+  %call2 = tail call spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order12memory_scope(ptr addrspace(4) %0, i32 0, i32 1) #2
+  %call3 = tail call spir_func float @_Z15atomic_exchangePU3AS4VU7_Atomicff(ptr addrspace(4) %0, float 1.000000e+00) #2
+  %call4 = tail call spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order(ptr addrspace(4) %0, float 1.000000e+00, i32 0) #2
+  %call5 = tail call spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(ptr addrspace(4) %0, float 1.000000e+00, i32 0, i32 1) #2
   ret void
 }
 
-declare spir_func void @_Z11atomic_initPU3AS4VU7_Atomicff(float addrspace(4)*, float)
+declare spir_func void @_Z11atomic_initPU3AS4VU7_Atomicff(ptr addrspace(4), float)
 
-declare spir_func void @_Z12atomic_storePU3AS4VU7_Atomicff(float addrspace(4)*, float)
+declare spir_func void @_Z12atomic_storePU3AS4VU7_Atomicff(ptr addrspace(4), float)
 
-declare spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order(float addrspace(4)*, float, i32)
+declare spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order(ptr addrspace(4), float, i32)
 
-declare spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(float addrspace(4)*, float, i32, i32)
+declare spir_func void @_Z21atomic_store_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(ptr addrspace(4), float, i32, i32)
 
-declare spir_func float @_Z11atomic_loadPU3AS4VU7_Atomicf(float addrspace(4)*)
+declare spir_func float @_Z11atomic_loadPU3AS4VU7_Atomicf(ptr addrspace(4))
 
-declare spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order(float addrspace(4)*, i32)
+declare spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order(ptr addrspace(4), i32)
 
-declare spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order12memory_scope(float addrspace(4)*, i32, i32)
+declare spir_func float @_Z20atomic_load_explicitPU3AS4VU7_Atomicf12memory_order12memory_scope(ptr addrspace(4), i32, i32)
 
-declare spir_func float @_Z15atomic_exchangePU3AS4VU7_Atomicff(float addrspace(4)*, float)
+declare spir_func float @_Z15atomic_exchangePU3AS4VU7_Atomicff(ptr addrspace(4), float)
 
-declare spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order(float addrspace(4)*, float, i32)
+declare spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order(ptr addrspace(4), float, i32)
 
-declare spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(float addrspace(4)*, float, i32, i32)
+declare spir_func float @_Z24atomic_exchange_explicitPU3AS4VU7_Atomicff12memory_order12memory_scope(ptr addrspace(4), float, i32, i32)
 
 !3 = !{i32 3}
 !4 = !{!"none"}
diff --git a/llvm/test/CodeGen/SPIRV/ComparePointers.ll b/llvm/test/CodeGen/SPIRV/ComparePointers.ll
index bc1514e145cb5..59c052f04915e 100644
--- a/llvm/test/CodeGen/SPIRV/ComparePointers.ll
+++ b/llvm/test/CodeGen/SPIRV/ComparePointers.ll
@@ -25,40 +25,40 @@
 ; CHECK-SPIRV: OpConvertPtrToU
 ; CHECK-SPIRV: OpULessThan
 
-define dso_local spir_kernel void @test(i32 addrspace(1)* noundef %in, i32 addrspace(1)* noundef %in2) {
+define dso_local spir_kernel void @test(ptr addrspace(1) noundef %in, ptr addrspace(1) noundef %in2) {
 entry:
-  %in.addr = alloca i32 addrspace(1)*, align 8
-  %in2.addr = alloca i32 addrspace(1)*, align 8
-  store i32 addrspace(1)* %in, i32 addrspace(1)** %in.addr, align 8
-  store i32 addrspace(1)* %in2, i32 addrspace(1)** %in2.addr, align 8
-  %0 = load i32 addrspace(1)*, i32 addrspace(1)** %in.addr, align 8
-  %tobool = icmp ne i32 addrspace(1)* %0, null
+  %in.addr = alloca ptr addrspace(1), align 8
+  %in2.addr = alloca ptr addrspace(1), align 8
+  store ptr addrspace(1) %in, ptr %in.addr, align 8
+  store ptr addrspace(1) %in2, ptr %in2.addr, align 8
+  %0 = load ptr addrspace(1), ptr %in.addr, align 8
+  %tobool = icmp ne ptr addrspace(1) %0, null
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
   br label %if.end8
 
 if.end:                                           ; preds = %entry
-  %1 = load i32 addrspace(1)*, i32 addrspace(1)** %in.addr, align 8
-  %cmp = icmp eq i32 addrspace(1)* %1, inttoptr (i64 1 to i32 addrspace(1)*)
+  %1 = load ptr addrspace(1), ptr %in.addr, align 8
+  %cmp = icmp eq ptr addrspace(1) %1, inttoptr (i64 1 to ptr addrspace(1))
   br i1 %cmp, label %if.then1, label %if.end2
 
 if.then1:                                         ; preds = %if.end
   br label %if.end8
 
 if.end2:                                          ; preds = %if.end
-  %2 = load i32 addrspace(1)*, i32 addrspace(1)** %in.addr, align 8
-  %3 = load i32 addrspace(1)*, i32 addrspace(1)** %in2.addr, align 8
-  %cmp3 = icmp ugt i32 addrspace(1)* %2, %3
+  %2 = load ptr addrspace(1), ptr %in.addr, align 8
+  %3 = load ptr addrspace(1), ptr %in2.addr, align 8
+  %cmp3 = icmp ugt ptr addrspace(1) %2, %3
   br i1 %cmp3, label %if.then4, label %if.end5
 
 if.then4:                                         ; preds = %if.end2
   br label %if.end8
 
 if.end5:                                          ; preds = %if.end2
-  %4 = load i32 addrspace(1)*, i32 addrspace(1)** %in.addr, align 8
-  %5 = load i32 addrspace(1)*, i32 addrspace(1)** %in2.addr, align 8
-  %cmp6 = icmp ult i32 addrspace(1)* %4, %5
+  %4 = load ptr addrspace(1), ptr %in.addr, align 8
+  %5 = load ptr addrspace(1), ptr %in2.addr, align 8
+  %cmp6 = icmp ult ptr addrspace(1) %4, %5
   br i1 %cmp6, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.end5
diff --git a/llvm/test/CodeGen/SPIRV/EnqueueEmptyKernel.ll b/llvm/test/CodeGen/SPIRV/EnqueueEmptyKernel.ll
index a6f172a81dc7c..484f86a65880d 100644
--- a/llvm/test/CodeGen/SPIRV/EnqueueEmptyKernel.ll
+++ b/llvm/test/CodeGen/SPIRV/EnqueueEmptyKernel.ll
@@ -34,34 +34,34 @@
 define spir_kernel void @test_enqueue_empty() {
 entry:
   %tmp = alloca %struct.ndrange_t, align 8
-  %call = call spir_func %opencl.queue_t* @_Z17get_default_queuev()
-  call spir_func void @_Z10ndrange_1Dm(%struct.ndrange_t* sret(%struct.ndrange_t*) %tmp, i64 1)
-  %0 = call i32 @__enqueue_kernel_basic_events(%opencl.queue_t* %call, i32 1, %struct.ndrange_t* %tmp, i32 0, %opencl.clk_event_t* addrspace(4)* null, %opencl.clk_event_t* addrspace(4)* null, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* @__test_enqueue_empty_block_invoke_kernel to i8*) to i8 addrspace(4)*), i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32 } addrspace(1)* @__block_literal_global to i8 addrspace(1)*) to i8 addrspace(4)*))
+  %call = call spir_func ptr @_Z17get_default_queuev()
+  call spir_func void @_Z10ndrange_1Dm(ptr sret(ptr) %tmp, i64 1)
+  %0 = call i32 @__enqueue_kernel_basic_events(ptr %call, i32 1, ptr %tmp, i32 0, ptr addrspace(4) null, ptr addrspace(4) null, ptr addrspace(4) addrspacecast (ptr @__test_enqueue_empty_block_invoke_kernel to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(1) @__block_literal_global to ptr addrspace(4)))
   ret void
 ; CHECK-SPIRV: %[[#Int8PtrBlock:]] = OpBitcast %[[#Int8Ptr]] %[[#Block]]
 ; CHECK-SPIRV: %[[#Int8PtrGenBlock:]] = OpPtrCastToGeneric %[[#Int8PtrGen]] %[[#Int8PtrBlock]]
 ; CHECK-SPIRV: %[[#]] = OpEnqueueKernel %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#Invoke:]] %[[#Int8PtrGenBlock]] %[[#]] %[[#]]
 }
 
-declare spir_func %opencl.queue_t* @_Z17get_default_queuev()
+declare spir_func ptr @_Z17get_default_queuev()
 
-declare spir_func void @_Z10ndrange_1Dm(%struct.ndrange_t* sret(%struct.ndrange_t*), i64)
+declare spir_func void @_Z10ndrange_1Dm(ptr sret(ptr), i64)
 
-define internal spir_func void @__test_enqueue_empty_block_invoke(i8 addrspace(4)* %.block_descriptor) {
+define internal spir_func void @__test_enqueue_empty_block_invoke(ptr addrspace(4) %.block_descriptor) {
 entry:
-  %.block_descriptor.addr = alloca i8 addrspace(4)*, align 8
-  store i8 addrspace(4)* %.block_descriptor, i8 addrspace(4)** %.block_descriptor.addr, align 8
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32 }> addrspace(4)*
+  %.block_descriptor.addr = alloca ptr addrspace(4), align 8
+  store ptr addrspace(4) %.block_descriptor, ptr %.block_descriptor.addr, align 8
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
   ret void
 }
 
-define internal spir_kernel void @__test_enqueue_empty_block_invoke_kernel(i8 addrspace(4)*) {
+define internal spir_kernel void @__test_enqueue_empty_block_invoke_kernel(ptr addrspace(4)) {
 entry:
-  call void @__test_enqueue_empty_block_invoke(i8 addrspace(4)* %0)
+  call void @__test_enqueue_empty_block_invoke(ptr addrspace(4) %0)
   ret void
 }
 
-declare i32 @__enqueue_kernel_basic_events(%opencl.queue_t*, i32, %struct.ndrange_t*, i32, %opencl.clk_event_t* addrspace(4)*, %opencl.clk_event_t* addrspace(4)*, i8 addrspace(4)*, i8 addrspace(4)*)
+declare i32 @__enqueue_kernel_basic_events(ptr, i32, ptr, i32, ptr addrspace(4), ptr addrspace(4), ptr addrspace(4), ptr addrspace(4))
 
 ; CHECK-SPIRV:      %[[#Invoke]] = OpFunction %[[#Void]] None %[[#]]
 ; CHECK-SPIRV-NEXT: %[[#]] = OpFunctionParameter %[[#Int8PtrGen]]
diff --git a/llvm/test/CodeGen/SPIRV/ExecutionMode.ll b/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
index 180b7246952db..fb94ce75eea51 100644
--- a/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
+++ b/llvm/test/CodeGen/SPIRV/ExecutionMode.ll
@@ -22,34 +22,34 @@
 
 define internal spir_func void @__cxx_global_var_init() {
 entry:
-  call spir_func void @_ZNU3AS416global_ctor_dtorC1Ei(%struct.global_ctor_dtor addrspace(4)* addrspacecast (%struct.global_ctor_dtor addrspace(1)* @g to %struct.global_ctor_dtor addrspace(4)*), i32 12)
+  call spir_func void @_ZNU3AS416global_ctor_dtorC1Ei(ptr addrspace(4) addrspacecast (ptr addrspace(1) @g to ptr addrspace(4)), i32 12)
   ret void
 }
 
-define linkonce_odr spir_func void @_ZNU3AS416global_ctor_dtorC1Ei(%struct.global_ctor_dtor addrspace(4)* %this, i32 %i) unnamed_addr align 2 {
+define linkonce_odr spir_func void @_ZNU3AS416global_ctor_dtorC1Ei(ptr addrspace(4) %this, i32 %i) unnamed_addr align 2 {
 entry:
-  %this.addr = alloca %struct.global_ctor_dtor addrspace(4)*, align 4
+  %this.addr = alloca ptr addrspace(4), align 4
   %i.addr = alloca i32, align 4
-  store %struct.global_ctor_dtor addrspace(4)* %this, %struct.global_ctor_dtor addrspace(4)** %this.addr, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %this1 = load %struct.global_ctor_dtor addrspace(4)*, %struct.global_ctor_dtor addrspace(4)** %this.addr
-  %0 = load i32, i32* %i.addr, align 4
-  call spir_func void @_ZNU3AS416global_ctor_dtorC2Ei(%struct.global_ctor_dtor addrspace(4)* %this1, i32 %0)
+  store ptr addrspace(4) %this, ptr %this.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %this1 = load ptr addrspace(4), ptr %this.addr
+  %0 = load i32, ptr %i.addr, align 4
+  call spir_func void @_ZNU3AS416global_ctor_dtorC2Ei(ptr addrspace(4) %this1, i32 %0)
   ret void
 }
 
-define linkonce_odr spir_func void @_ZNU3AS416global_ctor_dtorD1Ev(%struct.global_ctor_dtor addrspace(4)* %this) unnamed_addr align 2 {
+define linkonce_odr spir_func void @_ZNU3AS416global_ctor_dtorD1Ev(ptr addrspace(4) %this) unnamed_addr align 2 {
 entry:
-  %this.addr = alloca %struct.global_ctor_dtor addrspace(4)*, align 4
-  store %struct.global_ctor_dtor addrspace(4)* %this, %struct.global_ctor_dtor addrspace(4)** %this.addr, align 4
-  %this1 = load %struct.global_ctor_dtor addrspace(4)*, %struct.global_ctor_dtor addrspace(4)** %this.addr
-  call spir_func void @_ZNU3AS416global_ctor_dtorD2Ev(%struct.global_ctor_dtor addrspace(4)* %this1)
+  %this.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %this, ptr %this.addr, align 4
+  %this1 = load ptr addrspace(4), ptr %this.addr
+  call spir_func void @_ZNU3AS416global_ctor_dtorD2Ev(ptr addrspace(4) %this1)
   ret void
 }
 
 define internal spir_func void @__dtor_g() {
 entry:
-  call spir_func void @_ZNU3AS416global_ctor_dtorD1Ev(%struct.global_ctor_dtor addrspace(4)* addrspacecast (%struct.global_ctor_dtor addrspace(1)* @g to %struct.global_ctor_dtor addrspace(4)*))
+  call spir_func void @_ZNU3AS416global_ctor_dtorD1Ev(ptr addrspace(4) addrspacecast (ptr addrspace(1) @g to ptr addrspace(4)))
   ret void
 }
 
@@ -60,26 +60,26 @@ entry:
   ret void
 }
 
-define linkonce_odr spir_func void @_ZNU3AS416global_ctor_dtorD2Ev(%struct.global_ctor_dtor addrspace(4)* %this) unnamed_addr align 2 {
+define linkonce_odr spir_func void @_ZNU3AS416global_ctor_dtorD2Ev(ptr addrspace(4) %this) unnamed_addr align 2 {
 entry:
-  %this.addr = alloca %struct.global_ctor_dtor addrspace(4)*, align 4
-  store %struct.global_ctor_dtor addrspace(4)* %this, %struct.global_ctor_dtor addrspace(4)** %this.addr, align 4
-  %this1 = load %struct.global_ctor_dtor addrspace(4)*, %struct.global_ctor_dtor addrspace(4)** %this.addr
-  %a = getelementptr inbounds %struct.global_ctor_dtor, %struct.global_ctor_dtor addrspace(4)* %this1, i32 0, i32 0
-  store i32 0, i32 addrspace(4)* %a, align 4
+  %this.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %this, ptr %this.addr, align 4
+  %this1 = load ptr addrspace(4), ptr %this.addr
+  %a = getelementptr inbounds %struct.global_ctor_dtor, ptr addrspace(4) %this1, i32 0, i32 0
+  store i32 0, ptr addrspace(4) %a, align 4
   ret void
 }
 
-define linkonce_odr spir_func void @_ZNU3AS416global_ctor_dtorC2Ei(%struct.global_ctor_dtor addrspace(4)* %this, i32 %i) unnamed_addr align 2 {
+define linkonce_odr spir_func void @_ZNU3AS416global_ctor_dtorC2Ei(ptr addrspace(4) %this, i32 %i) unnamed_addr align 2 {
 entry:
-  %this.addr = alloca %struct.global_ctor_dtor addrspace(4)*, align 4
+  %this.addr = alloca ptr addrspace(4), align 4
   %i.addr = alloca i32, align 4
-  store %struct.global_ctor_dtor addrspace(4)* %this, %struct.global_ctor_dtor addrspace(4)** %this.addr, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %this1 = load %struct.global_ctor_dtor addrspace(4)*, %struct.global_ctor_dtor addrspace(4)** %this.addr
-  %0 = load i32, i32* %i.addr, align 4
-  %a = getelementptr inbounds %struct.global_ctor_dtor, %struct.global_ctor_dtor addrspace(4)* %this1, i32 0, i32 0
-  store i32 %0, i32 addrspace(4)* %a, align 4
+  store ptr addrspace(4) %this, ptr %this.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %this1 = load ptr addrspace(4), ptr %this.addr
+  %0 = load i32, ptr %i.addr, align 4
+  %a = getelementptr inbounds %struct.global_ctor_dtor, ptr addrspace(4) %this1, i32 0, i32 0
+  store i32 %0, ptr addrspace(4) %a, align 4
   ret void
 }
 
@@ -107,11 +107,11 @@ entry:
 
 !spirv.ExecutionMode = !{!0, !1, !2, !3, !4, !5, !6, !7}
 
-!0 = !{void ()* @worker, i32 30, i32 262149}
-!1 = !{void ()* @worker, i32 18, i32 12, i32 10, i32 1}
-!2 = !{void ()* @worker, i32 17, i32 10, i32 10, i32 10}
-!3 = !{void ()* @worker, i32 36, i32 4}
-!4 = !{void ()* @_SPIRV_GLOBAL__I_45b04794_Test_attr.cl, i32 33}
-!5 = !{void ()* @_SPIRV_GLOBAL__I_45b04794_Test_attr.cl, i32 17, i32 1, i32 1, i32 1}
-!6 = !{void ()* @_SPIRV_GLOBAL__D_45b04794_Test_attr.cl, i32 34}
-!7 = !{void ()* @_SPIRV_GLOBAL__D_45b04794_Test_attr.cl, i32 17, i32 1, i32 1, i32 1}
+!0 = !{ptr @worker, i32 30, i32 262149}
+!1 = !{ptr @worker, i32 18, i32 12, i32 10, i32 1}
+!2 = !{ptr @worker, i32 17, i32 10, i32 10, i32 10}
+!3 = !{ptr @worker, i32 36, i32 4}
+!4 = !{ptr @_SPIRV_GLOBAL__I_45b04794_Test_attr.cl, i32 33}
+!5 = !{ptr @_SPIRV_GLOBAL__I_45b04794_Test_attr.cl, i32 17, i32 1, i32 1, i32 1}
+!6 = !{ptr @_SPIRV_GLOBAL__D_45b04794_Test_attr.cl, i32 34}
+!7 = !{ptr @_SPIRV_GLOBAL__D_45b04794_Test_attr.cl, i32 17, i32 1, i32 1, i32 1}
diff --git a/llvm/test/CodeGen/SPIRV/FOrdGreaterThanEqual_int.ll b/llvm/test/CodeGen/SPIRV/FOrdGreaterThanEqual_int.ll
index 79ca18bb70a20..2baa06f0ab3b5 100644
--- a/llvm/test/CodeGen/SPIRV/FOrdGreaterThanEqual_int.ll
+++ b/llvm/test/CodeGen/SPIRV/FOrdGreaterThanEqual_int.ll
@@ -5,10 +5,10 @@
 
 ;; LLVM IR was generated with -cl-std=c++ option
 
-define spir_kernel void @test(float %op1, float %op2, i32 addrspace(1)* %out) {
+define spir_kernel void @test(float %op1, float %op2, ptr addrspace(1) %out) {
 entry:
   %call = call spir_func i32 @_Z14isgreaterequalff(float %op1, float %op2)
-  store i32 %call, i32 addrspace(1)* %out
+  store i32 %call, ptr addrspace(1) %out
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/SpecConstants/bool-spirv-specconstant.ll b/llvm/test/CodeGen/SPIRV/SpecConstants/bool-spirv-specconstant.ll
index 125cc6137e78b..f5a5cafaaefc2 100644
--- a/llvm/test/CodeGen/SPIRV/SpecConstants/bool-spirv-specconstant.ll
+++ b/llvm/test/CodeGen/SPIRV/SpecConstants/bool-spirv-specconstant.ll
@@ -13,19 +13,19 @@
 
 $"_ZTSZZ4mainENK3$_0clERN2cl4sycl7handlerEE7Kernel1" = comdat any
 
-define weak_odr dso_local spir_kernel void @"_ZTSZZ4mainENK3$_0clERN2cl4sycl7handlerEE7Kernel1"(i8 addrspace(1)* %_arg_, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_1, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_2, %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id"* byval(%"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id") align 8 %_arg_3) local_unnamed_addr comdat {
+define weak_odr dso_local spir_kernel void @"_ZTSZZ4mainENK3$_0clERN2cl4sycl7handlerEE7Kernel1"(ptr addrspace(1) %_arg_, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_1, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_2, ptr byval(%"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id") align 8 %_arg_3) local_unnamed_addr comdat {
 entry:
-  %0 = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id", %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id"* %_arg_3, i64 0, i32 0, i32 0, i64 0
-  %1 = addrspacecast i64* %0 to i64 addrspace(4)*
-  %2 = load i64, i64 addrspace(4)* %1, align 8
-  %add.ptr.i = getelementptr inbounds i8, i8 addrspace(1)* %_arg_, i64 %2
+  %0 = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id", ptr %_arg_3, i64 0, i32 0, i32 0, i64 0
+  %1 = addrspacecast ptr %0 to ptr addrspace(4)
+  %2 = load i64, ptr addrspace(4) %1, align 8
+  %add.ptr.i = getelementptr inbounds i8, ptr addrspace(1) %_arg_, i64 %2
   %3 = call i1 @_Z20__spirv_SpecConstantia(i32 0, i8 1)
-  %ptridx.ascast.i.i = addrspacecast i8 addrspace(1)* %add.ptr.i to i8 addrspace(4)*
+  %ptridx.ascast.i.i = addrspacecast ptr addrspace(1) %add.ptr.i to ptr addrspace(4)
   %selected = select i1 %3, i8 0, i8 1
   %frombool.i = zext i1 %3 to i8
   %sum = add i8 %frombool.i, %selected
-  store volatile i8 %sum, i8 addrspace(4)* %ptridx.ascast.i.i, align 1
-  store i8 %selected, i8 addrspace(4)* %ptridx.ascast.i.i, align 1
+  store volatile i8 %sum, ptr addrspace(4) %ptridx.ascast.i.i, align 1
+  store i8 %selected, ptr addrspace(4) %ptridx.ascast.i.i, align 1
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/TruncToBool.ll b/llvm/test/CodeGen/SPIRV/TruncToBool.ll
index 8e23f4eb06914..5d45f48e2603a 100644
--- a/llvm/test/CodeGen/SPIRV/TruncToBool.ll
+++ b/llvm/test/CodeGen/SPIRV/TruncToBool.ll
@@ -3,11 +3,11 @@
 ; CHECK-SPIRV:      OpBitwiseAnd
 ; CHECK-SPIRV-NEXT: OpINotEqual
 
-define spir_kernel void @test(i32 %op1, i32 %op2, i8 %op3, i32 addrspace(1)* %out) {
+define spir_kernel void @test(i32 %op1, i32 %op2, i8 %op3, ptr addrspace(1) %out) {
 entry:
   %0 = trunc i8 %op3 to i1
   %call = call spir_func i32 @_Z14__spirv_Selectbii(i1 zeroext %0, i32 %op1, i32 %op2)
-  store i32 %call, i32 addrspace(1)* %out
+  store i32 %call, ptr addrspace(1) %out
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/atomicrmw.ll b/llvm/test/CodeGen/SPIRV/atomicrmw.ll
index a6cfe56cd06c3..5873baa9c412b 100644
--- a/llvm/test/CodeGen/SPIRV/atomicrmw.ll
+++ b/llvm/test/CodeGen/SPIRV/atomicrmw.ll
@@ -23,37 +23,37 @@
 
 define dso_local spir_func void @test_atomicrmw() local_unnamed_addr {
 entry:
-  %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 acq_rel
+  %0 = atomicrmw xchg ptr addrspace(1) @ui, i32 42 acq_rel
 ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_AcquireRelease]] %[[#Value]]
 
-  %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 seq_cst
+  %1 = atomicrmw xchg ptr addrspace(1) @f, float 42.000000e+00 seq_cst
 ; CHECK: %[[#]] = OpAtomicExchange %[[#Float]] %[[#FPPointer]] %[[#Scope_CrossDevice]] %[[#MemSem_SequentiallyConsistent]] %[[#FPValue]]
 
-  %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 monotonic
+  %2 = atomicrmw add ptr addrspace(1) @ui, i32 42 monotonic
 ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %{{.+}} %[[#Value]]
 
-  %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 acquire
+  %3 = atomicrmw sub ptr addrspace(1) @ui, i32 42 acquire
 ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_Acquire]] %[[#Value]]
 
-  %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 release
+  %4 = atomicrmw or ptr addrspace(1) @ui, i32 42 release
 ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_Release]] %[[#Value]]
 
-  %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 acq_rel
+  %5 = atomicrmw xor ptr addrspace(1) @ui, i32 42 acq_rel
 ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_AcquireRelease]] %[[#Value]]
 
-  %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 seq_cst
+  %6 = atomicrmw and ptr addrspace(1) @ui, i32 42 seq_cst
 ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_SequentiallyConsistent]] %[[#Value]]
 
-  %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 monotonic
+  %7 = atomicrmw max ptr addrspace(1) @ui, i32 42 monotonic
 ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %{{.*}} %[[#Value]]
 
-  %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 acquire
+  %8 = atomicrmw min ptr addrspace(1) @ui, i32 42 acquire
 ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_Acquire]] %[[#Value]]
 
-  %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 release
+  %9 = atomicrmw umax ptr addrspace(1) @ui, i32 42 release
 ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_Release]] %[[#Value]]
 
-  %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 acq_rel
+  %10 = atomicrmw umin ptr addrspace(1) @ui, i32 42 acq_rel
 ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer]] %[[#Scope_CrossDevice]] %[[#MemSem_AcquireRelease]] %[[#Value]]
 
   ret void
diff --git a/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll b/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll
index f3c8f9967211a..5eba16c964f16 100644
--- a/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll
+++ b/llvm/test/CodeGen/SPIRV/basic_int_types_spirvdis.ll
@@ -6,51 +6,51 @@ define void @main() {
 entry:
 ; CHECK: %int16_t_Val = OpVariable %_ptr_Function_ushort Function
   %int16_t_Val = alloca i16, align 2
-  store i16 0, i16* %int16_t_Val, align 2
+  store i16 0, ptr %int16_t_Val, align 2
 
 ; CHECK: %int_Val = OpVariable %_ptr_Function_uint Function
   %int_Val = alloca i32, align 4
-  store i32 0, i32* %int_Val, align 4
+  store i32 0, ptr %int_Val, align 4
 
 ; CHECK: %int64_t_Val = OpVariable %_ptr_Function_ulong Function
   %int64_t_Val = alloca i64, align 8
-  store i64 0, i64* %int64_t_Val, align 8
+  store i64 0, ptr %int64_t_Val, align 8
 
 ; CHECK: %int16_t2_Val = OpVariable %_ptr_Function_v2ushort Function
   %int16_t2_Val = alloca <2 x i16>, align 4
-  store <2 x i16> zeroinitializer, <2 x i16>* %int16_t2_Val, align 4
+  store <2 x i16> zeroinitializer, ptr %int16_t2_Val, align 4
 
 ; CHECK: %int16_t3_Val = OpVariable %_ptr_Function_v3ushort Function
   %int16_t3_Val = alloca <3 x i16>, align 8
-  store <3 x i16> zeroinitializer, <3 x i16>* %int16_t3_Val, align 8
+  store <3 x i16> zeroinitializer, ptr %int16_t3_Val, align 8
 
 ; CHECK: %int16_t4_Val = OpVariable %_ptr_Function_v4ushort Function
   %int16_t4_Val = alloca <4 x i16>, align 8
-  store <4 x i16> zeroinitializer, <4 x i16>* %int16_t4_Val, align 8
+  store <4 x i16> zeroinitializer, ptr %int16_t4_Val, align 8
 
 ; CHECK: %int2_Val = OpVariable %_ptr_Function_v2uint Function
   %int2_Val = alloca <2 x i32>, align 8
-  store <2 x i32> zeroinitializer, <2 x i32>* %int2_Val, align 8
+  store <2 x i32> zeroinitializer, ptr %int2_Val, align 8
 
 ; CHECK: %int3_Val = OpVariable %_ptr_Function_v3uint Function
   %int3_Val = alloca <3 x i32>, align 16
-  store <3 x i32> zeroinitializer, <3 x i32>* %int3_Val, align 16
+  store <3 x i32> zeroinitializer, ptr %int3_Val, align 16
 
 ; CHECK: %int4_Val = OpVariable %_ptr_Function_v4uint Function
   %int4_Val = alloca <4 x i32>, align 16
-  store <4 x i32> zeroinitializer, <4 x i32>* %int4_Val, align 16
+  store <4 x i32> zeroinitializer, ptr %int4_Val, align 16
 
 ; CHECK: %int64_t2_Val = OpVariable %_ptr_Function_v2ulong Function
   %int64_t2_Val = alloca <2 x i64>, align 16
-  store <2 x i64> zeroinitializer, <2 x i64>* %int64_t2_Val, align 16
+  store <2 x i64> zeroinitializer, ptr %int64_t2_Val, align 16
 
 ; CHECK: %int64_t3_Val = OpVariable %_ptr_Function_v3ulong Function
   %int64_t3_Val = alloca <3 x i64>, align 32
-  store <3 x i64> zeroinitializer, <3 x i64>* %int64_t3_Val, align 32
+  store <3 x i64> zeroinitializer, ptr %int64_t3_Val, align 32
 
 ; CHECK: %int64_t4_Val = OpVariable %_ptr_Function_v4ulong Function
   %int64_t4_Val = alloca <4 x i64>, align 32
-  store <4 x i64> zeroinitializer, <4 x i64>* %int64_t4_Val, align 32
+  store <4 x i64> zeroinitializer, ptr %int64_t4_Val, align 32
 
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/branching/OpSwitch32.ll b/llvm/test/CodeGen/SPIRV/branching/OpSwitch32.ll
index fdf4e4ddbefb7..2f49317dd913e 100644
--- a/llvm/test/CodeGen/SPIRV/branching/OpSwitch32.ll
+++ b/llvm/test/CodeGen/SPIRV/branching/OpSwitch32.ll
@@ -18,34 +18,34 @@
 
 ; CHECK-SPIRV: OpSwitch %[[#]] %[[#]] 0 %[[#]] 1 %[[#]]
 
-define spir_kernel void @test_32(i32 addrspace(1)* %res) {
+define spir_kernel void @test_32(ptr addrspace(1) %res) {
 entry:
-  %res.addr = alloca i32 addrspace(1)*, align 8
+  %res.addr = alloca ptr addrspace(1), align 8
   %tid = alloca i32, align 4
-  store i32 addrspace(1)* %res, i32 addrspace(1)** %res.addr, align 8
+  store ptr addrspace(1) %res, ptr %res.addr, align 8
   %call = call spir_func i64 @_Z13get_global_idj(i32 0)
   %conv = trunc i64 %call to i32
-  store i32 %conv, i32* %tid, align 4
-  %0 = load i32, i32* %tid, align 4
+  store i32 %conv, ptr %tid, align 4
+  %0 = load i32, ptr %tid, align 4
   switch i32 %0, label %sw.epilog [
     i32 0, label %sw.bb
     i32 1, label %sw.bb1
   ]
 
 sw.bb:                                            ; preds = %entry
-  %1 = load i32, i32* %tid, align 4
+  %1 = load i32, ptr %tid, align 4
   %idxprom = sext i32 %1 to i64
-  %2 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 8
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %2, i64 %idxprom
-  store i32 1, i32 addrspace(1)* %arrayidx, align 4
+  %2 = load ptr addrspace(1), ptr %res.addr, align 8
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %2, i64 %idxprom
+  store i32 1, ptr addrspace(1) %arrayidx, align 4
   br label %sw.epilog
 
 sw.bb1:                                           ; preds = %entry
-  %3 = load i32, i32* %tid, align 4
+  %3 = load i32, ptr %tid, align 4
   %idxprom2 = sext i32 %3 to i64
-  %4 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 8
-  %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %4, i64 %idxprom2
-  store i32 2, i32 addrspace(1)* %arrayidx3, align 4
+  %4 = load ptr addrspace(1), ptr %res.addr, align 8
+  %arrayidx3 = getelementptr inbounds i32, ptr addrspace(1) %4, i64 %idxprom2
+  store i32 2, ptr addrspace(1) %arrayidx3, align 4
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %entry, %sw.bb1, %sw.bb
diff --git a/llvm/test/CodeGen/SPIRV/branching/OpSwitch64.ll b/llvm/test/CodeGen/SPIRV/branching/OpSwitch64.ll
index 8fc986fe41b44..f96d6ba20b662 100644
--- a/llvm/test/CodeGen/SPIRV/branching/OpSwitch64.ll
+++ b/llvm/test/CodeGen/SPIRV/branching/OpSwitch64.ll
@@ -25,14 +25,14 @@
 
 ; CHECK-SPIRV: OpSwitch %[[#]] %[[#]] 0 %[[#]] 1 %[[#]] 21474836481 %[[#]]
 
-define spir_kernel void @test_64(i32 addrspace(1)* %res) {
+define spir_kernel void @test_64(ptr addrspace(1) %res) {
 entry:
-  %res.addr = alloca i32 addrspace(1)*, align 8
+  %res.addr = alloca ptr addrspace(1), align 8
   %tid = alloca i64, align 8
-  store i32 addrspace(1)* %res, i32 addrspace(1)** %res.addr, align 8
+  store ptr addrspace(1) %res, ptr %res.addr, align 8
   %call = call spir_func i64 @_Z13get_global_idj(i32 0)
-  store i64 %call, i64* %tid, align 8
-  %0 = load i64, i64* %tid, align 8
+  store i64 %call, ptr %tid, align 8
+  %0 = load i64, ptr %tid, align 8
   switch i64 %0, label %sw.epilog [
     i64 0, label %sw.bb
     i64 1, label %sw.bb1
@@ -40,24 +40,24 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry
-  %1 = load i64, i64* %tid, align 8
-  %2 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 8
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %2, i64 %1
-  store i32 1, i32 addrspace(1)* %arrayidx, align 4
+  %1 = load i64, ptr %tid, align 8
+  %2 = load ptr addrspace(1), ptr %res.addr, align 8
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %2, i64 %1
+  store i32 1, ptr addrspace(1) %arrayidx, align 4
   br label %sw.epilog
 
 sw.bb1:                                           ; preds = %entry
-  %3 = load i64, i64* %tid, align 8
-  %4 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 8
-  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %4, i64 %3
-  store i32 2, i32 addrspace(1)* %arrayidx2, align 4
+  %3 = load i64, ptr %tid, align 8
+  %4 = load ptr addrspace(1), ptr %res.addr, align 8
+  %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %4, i64 %3
+  store i32 2, ptr addrspace(1) %arrayidx2, align 4
   br label %sw.epilog
 
 sw.bb3:                                           ; preds = %entry
-  %5 = load i64, i64* %tid, align 8
-  %6 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 8
-  %arrayidx4 = getelementptr inbounds i32, i32 addrspace(1)* %6, i64 %5
-  store i32 3, i32 addrspace(1)* %arrayidx4, align 4
+  %5 = load i64, ptr %tid, align 8
+  %6 = load ptr addrspace(1), ptr %res.addr, align 8
+  %arrayidx4 = getelementptr inbounds i32, ptr addrspace(1) %6, i64 %5
+  store i32 3, ptr addrspace(1) %arrayidx4, align 4
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %entry, %sw.bb3, %sw.bb1, %sw.bb
diff --git a/llvm/test/CodeGen/SPIRV/branching/OpSwitchChar.ll b/llvm/test/CodeGen/SPIRV/branching/OpSwitchChar.ll
index 2e1b01ef703fb..fc488dd72b18c 100644
--- a/llvm/test/CodeGen/SPIRV/branching/OpSwitchChar.ll
+++ b/llvm/test/CodeGen/SPIRV/branching/OpSwitchChar.ll
@@ -18,13 +18,13 @@
 
 ; CHECK-SPIRV: OpSwitch %[[#]] %[[#]] 0 %[[#]] 1 %[[#]] 2 %[[#]]
 
-define spir_kernel void @test_switch(i32 addrspace(1)* %res, i8 zeroext %val) {
+define spir_kernel void @test_switch(ptr addrspace(1) %res, i8 zeroext %val) {
 entry:
-  %res.addr = alloca i32 addrspace(1)*, align 4
+  %res.addr = alloca ptr addrspace(1), align 4
   %val.addr = alloca i8, align 1
-  store i32 addrspace(1)* %res, i32 addrspace(1)** %res.addr, align 4
-  store i8 %val, i8* %val.addr, align 1
-  %0 = load i8, i8* %val.addr, align 1
+  store ptr addrspace(1) %res, ptr %res.addr, align 4
+  store i8 %val, ptr %val.addr, align 1
+  %0 = load i8, ptr %val.addr, align 1
   switch i8 %0, label %sw.epilog [
     i8 0, label %sw.bb
     i8 1, label %sw.bb1
@@ -32,18 +32,18 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry
-  %1 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 4
-  store i32 1, i32 addrspace(1)* %1, align 4
+  %1 = load ptr addrspace(1), ptr %res.addr, align 4
+  store i32 1, ptr addrspace(1) %1, align 4
   br label %sw.epilog
 
 sw.bb1:                                           ; preds = %entry
-  %2 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 4
-  store i32 2, i32 addrspace(1)* %2, align 4
+  %2 = load ptr addrspace(1), ptr %res.addr, align 4
+  store i32 2, ptr addrspace(1) %2, align 4
   br label %sw.epilog
 
 sw.bb2:                                           ; preds = %entry
-  %3 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 4
-  store i32 3, i32 addrspace(1)* %3, align 4
+  %3 = load ptr addrspace(1), ptr %res.addr, align 4
+  store i32 3, ptr addrspace(1) %3, align 4
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %entry, %sw.bb2, %sw.bb1, %sw.bb
diff --git a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll
index bca90f4ebd151..c348c934e85ff 100644
--- a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll
+++ b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_32.ll
@@ -66,87 +66,87 @@ entry:
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0
   %spv.num.workgroups = call i32 @llvm.spv.num.workgroups.i32(i32 0)
-  store i32 %spv.num.workgroups, i32* @G_spv_num_workgroups_0
+  store i32 %spv.num.workgroups, ptr @G_spv_num_workgroups_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1
   %spv.num.workgroups1 = call i32 @llvm.spv.num.workgroups.i32(i32 1)
-  store i32 %spv.num.workgroups1, i32* @G_spv_num_workgroups_1
+  store i32 %spv.num.workgroups1, ptr @G_spv_num_workgroups_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[NumWorkgroups]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2
   %spv.num.workgroups2 = call i32 @llvm.spv.num.workgroups.i32(i32 2)
-  store i32 %spv.num.workgroups2, i32* @G_spv_num_workgroups_2
+  store i32 %spv.num.workgroups2, ptr @G_spv_num_workgroups_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0
   %spv.workgroup.size = call i32 @llvm.spv.workgroup.size.i32(i32 0)
-  store i32 %spv.workgroup.size, i32* @G_spv_workgroup_size_0
+  store i32 %spv.workgroup.size, ptr @G_spv_workgroup_size_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1
   %spv.workgroup.size3 = call i32 @llvm.spv.workgroup.size.i32(i32 1)
-  store i32 %spv.workgroup.size3, i32* @G_spv_workgroup_size_1
+  store i32 %spv.workgroup.size3, ptr @G_spv_workgroup_size_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupSize]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2
   %spv.workgroup.size4 = call i32 @llvm.spv.workgroup.size.i32(i32 2)
-  store i32 %spv.workgroup.size4, i32* @G_spv_workgroup_size_2
+  store i32 %spv.workgroup.size4, ptr @G_spv_workgroup_size_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0
   %spv.group.id = call i32 @llvm.spv.group.id.i32(i32 0)
-  store i32 %spv.group.id, i32* @G_spv_group_id_0
+  store i32 %spv.group.id, ptr @G_spv_group_id_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1
   %spv.group.id5 = call i32 @llvm.spv.group.id.i32(i32 1)
-  store i32 %spv.group.id5, i32* @G_spv_group_id_1
+  store i32 %spv.group.id5, ptr @G_spv_group_id_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[WorkgroupId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2
   %spv.group.id6 = call i32 @llvm.spv.group.id.i32(i32 2)
-  store i32 %spv.group.id6, i32* @G_spv_group_id_2
+  store i32 %spv.group.id6, ptr @G_spv_group_id_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0
   %spv.thread.id.in.group = call i32 @llvm.spv.thread.id.in.group.i32(i32 0)
-  store i32 %spv.thread.id.in.group, i32* @G_spv_thread_id_in_group_0
+  store i32 %spv.thread.id.in.group, ptr @G_spv_thread_id_in_group_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1
   %spv.thread.id.in.group7 = call i32 @llvm.spv.thread.id.in.group.i32(i32 1)
-  store i32 %spv.thread.id.in.group7, i32* @G_spv_thread_id_in_group_1
+  store i32 %spv.thread.id.in.group7, ptr @G_spv_thread_id_in_group_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[LocalInvocationId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2
   %spv.thread.id.in.group8 = call i32 @llvm.spv.thread.id.in.group.i32(i32 2)
-  store i32 %spv.thread.id.in.group8, i32* @G_spv_thread_id_in_group_2
+  store i32 %spv.thread.id.in.group8, ptr @G_spv_thread_id_in_group_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0
   %spv.thread.id = call i32 @llvm.spv.thread.id.i32(i32 0)
-  store i32 %spv.thread.id, i32* @G_spv_thread_id_0
+  store i32 %spv.thread.id, ptr @G_spv_thread_id_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1
   %spv.thread.id9 = call i32 @llvm.spv.thread.id.i32(i32 1)
-  store i32 %spv.thread.id9, i32* @G_spv_thread_id_1
+  store i32 %spv.thread.id9, ptr @G_spv_thread_id_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalInvocationId]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2
   %spv.thread.id10 = call i32 @llvm.spv.thread.id.i32(i32 2)
-  store i32 %spv.thread.id10, i32* @G_spv_thread_id_2
+  store i32 %spv.thread.id10, ptr @G_spv_thread_id_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0
   %spv.num.workgroups11 = call i32 @llvm.spv.global.size.i32(i32 0)
-  store i32 %spv.num.workgroups11, i32* @G_spv_global_size_0
+  store i32 %spv.num.workgroups11, ptr @G_spv_global_size_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1
   %spv.num.workgroups12 = call i32 @llvm.spv.global.size.i32(i32 1)
-  store i32 %spv.num.workgroups12, i32* @G_spv_global_size_1
+  store i32 %spv.num.workgroups12, ptr @G_spv_global_size_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalSize]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2
   %spv.num.workgroups13 = call i32 @llvm.spv.global.size.i32(i32 2)
-  store i32 %spv.num.workgroups13, i32* @G_spv_global_size_2
+  store i32 %spv.num.workgroups13, ptr @G_spv_global_size_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 0
   %spv.global.offset = call i32 @llvm.spv.global.offset.i32(i32 0)
-  store i32 %spv.global.offset, i32* @G_spv_global_offset_0
+  store i32 %spv.global.offset, ptr @G_spv_global_offset_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 1
   %spv.global.offset14 = call i32 @llvm.spv.global.offset.i32(i32 1)
-  store i32 %spv.global.offset14, i32* @G_spv_global_offset_1
+  store i32 %spv.global.offset14, ptr @G_spv_global_offset_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I32V3]] [[GlobalOffset]]
 ; CHECK: OpCompositeExtract [[I32]] [[LD]] 2
   %spv.global.offset15 = call i32 @llvm.spv.global.offset.i32(i32 2)
-  store i32 %spv.global.offset15, i32* @G_spv_global_offset_2
+  store i32 %spv.global.offset15, ptr @G_spv_global_offset_2
 ; CHECK: OpLoad %5 [[SubgroupSize]]
   %0 = call i32 @llvm.spv.subgroup.size()
   store i32 %0, ptr %ssize, align 4
diff --git a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll
index 26c2d866d14c7..45227c31c6ad8 100644
--- a/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll
+++ b/llvm/test/CodeGen/SPIRV/builtin_intrinsics_64.ll
@@ -67,87 +67,87 @@ entry:
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0
   %spv.num.workgroups = call i64 @llvm.spv.num.workgroups.i64(i32 0)
-  store i64 %spv.num.workgroups, i64* @G_spv_num_workgroups_0
+  store i64 %spv.num.workgroups, ptr @G_spv_num_workgroups_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1
   %spv.num.workgroups1 = call i64 @llvm.spv.num.workgroups.i64(i32 1)
-  store i64 %spv.num.workgroups1, i64* @G_spv_num_workgroups_1
+  store i64 %spv.num.workgroups1, ptr @G_spv_num_workgroups_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[NumWorkgroups]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2
   %spv.num.workgroups2 = call i64 @llvm.spv.num.workgroups.i64(i32 2)
-  store i64 %spv.num.workgroups2, i64* @G_spv_num_workgroups_2
+  store i64 %spv.num.workgroups2, ptr @G_spv_num_workgroups_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0
   %spv.workgroup.size = call i64 @llvm.spv.workgroup.size.i64(i32 0)
-  store i64 %spv.workgroup.size, i64* @G_spv_workgroup_size_0
+  store i64 %spv.workgroup.size, ptr @G_spv_workgroup_size_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1
   %spv.workgroup.size3 = call i64 @llvm.spv.workgroup.size.i64(i32 1)
-  store i64 %spv.workgroup.size3, i64* @G_spv_workgroup_size_1
+  store i64 %spv.workgroup.size3, ptr @G_spv_workgroup_size_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupSize]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2
   %spv.workgroup.size4 = call i64 @llvm.spv.workgroup.size.i64(i32 2)
-  store i64 %spv.workgroup.size4, i64* @G_spv_workgroup_size_2
+  store i64 %spv.workgroup.size4, ptr @G_spv_workgroup_size_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0
   %spv.group.id = call i64 @llvm.spv.group.id.i64(i32 0)
-  store i64 %spv.group.id, i64* @G_spv_group_id_0
+  store i64 %spv.group.id, ptr @G_spv_group_id_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1
   %spv.group.id5 = call i64 @llvm.spv.group.id.i64(i32 1)
-  store i64 %spv.group.id5, i64* @G_spv_group_id_1
+  store i64 %spv.group.id5, ptr @G_spv_group_id_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[WorkgroupId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2
   %spv.group.id6 = call i64 @llvm.spv.group.id.i64(i32 2)
-  store i64 %spv.group.id6, i64* @G_spv_group_id_2
+  store i64 %spv.group.id6, ptr @G_spv_group_id_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0
   %spv.thread.id.in.group = call i64 @llvm.spv.thread.id.in.group.i64(i32 0)
-  store i64 %spv.thread.id.in.group, i64* @G_spv_thread_id_in_group_0
+  store i64 %spv.thread.id.in.group, ptr @G_spv_thread_id_in_group_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1
   %spv.thread.id.in.group7 = call i64 @llvm.spv.thread.id.in.group.i64(i32 1)
-  store i64 %spv.thread.id.in.group7, i64* @G_spv_thread_id_in_group_1
+  store i64 %spv.thread.id.in.group7, ptr @G_spv_thread_id_in_group_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[LocalInvocationId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2
   %spv.thread.id.in.group8 = call i64 @llvm.spv.thread.id.in.group.i64(i32 2)
-  store i64 %spv.thread.id.in.group8, i64* @G_spv_thread_id_in_group_2
+  store i64 %spv.thread.id.in.group8, ptr @G_spv_thread_id_in_group_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0
   %spv.thread.id = call i64 @llvm.spv.thread.id.i64(i32 0)
-  store i64 %spv.thread.id, i64* @G_spv_thread_id_0
+  store i64 %spv.thread.id, ptr @G_spv_thread_id_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1
   %spv.thread.id9 = call i64 @llvm.spv.thread.id.i64(i32 1)
-  store i64 %spv.thread.id9, i64* @G_spv_thread_id_1
+  store i64 %spv.thread.id9, ptr @G_spv_thread_id_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalInvocationId]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2
   %spv.thread.id10 = call i64 @llvm.spv.thread.id.i64(i32 2)
-  store i64 %spv.thread.id10, i64* @G_spv_thread_id_2
+  store i64 %spv.thread.id10, ptr @G_spv_thread_id_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0
   %spv.num.workgroups11 = call i64 @llvm.spv.global.size.i64(i32 0)
-  store i64 %spv.num.workgroups11, i64* @G_spv_global_size_0
+  store i64 %spv.num.workgroups11, ptr @G_spv_global_size_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1
   %spv.num.workgroups12 = call i64 @llvm.spv.global.size.i64(i32 1)
-  store i64 %spv.num.workgroups12, i64* @G_spv_global_size_1
+  store i64 %spv.num.workgroups12, ptr @G_spv_global_size_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalSize]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2
   %spv.num.workgroups13 = call i64 @llvm.spv.global.size.i64(i32 2)
-  store i64 %spv.num.workgroups13, i64* @G_spv_global_size_2
+  store i64 %spv.num.workgroups13, ptr @G_spv_global_size_2
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 0
   %spv.global.offset = call i64 @llvm.spv.global.offset.i64(i32 0)
-  store i64 %spv.global.offset, i64* @G_spv_global_offset_0
+  store i64 %spv.global.offset, ptr @G_spv_global_offset_0
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 1
   %spv.global.offset14 = call i64 @llvm.spv.global.offset.i64(i32 1)
-  store i64 %spv.global.offset14, i64* @G_spv_global_offset_1
+  store i64 %spv.global.offset14, ptr @G_spv_global_offset_1
 ; CHECK: [[LD:%[0-9]*]] = OpLoad [[I64V3]] [[GlobalOffset]]
 ; CHECK: OpCompositeExtract [[I64]] [[LD]] 2
   %spv.global.offset15 = call i64 @llvm.spv.global.offset.i64(i32 2)
-  store i64 %spv.global.offset15, i64* @G_spv_global_offset_2
+  store i64 %spv.global.offset15, ptr @G_spv_global_offset_2
 ; CHECK: OpLoad %5 [[SubgroupSize]]
   %0 = call i32 @llvm.spv.subgroup.size()
   store i32 %0, ptr %ssize, align 4
diff --git a/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll b/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll
index 8dd9b387a6d84..60ad2fda1ce40 100644
--- a/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll
+++ b/llvm/test/CodeGen/SPIRV/builtin_vars-decorate.ll
@@ -94,23 +94,23 @@
 define spir_kernel void @_Z1wv() {
 entry:
   %r1 = tail call spir_func i64 @get_global_linear_id()
-  store i64 %r1, i64* @G_r1
+  store i64 %r1, ptr @G_r1
   %r2 = tail call spir_func i64 @get_local_linear_id()
-  store i64 %r2, i64* @G_r2
+  store i64 %r2, ptr @G_r2
   %r3 = tail call spir_func i32 @get_work_dim()
-  store i32 %r3, i32* @G_r3
+  store i32 %r3, ptr @G_r3
   %r4 = tail call spir_func i32 @get_sub_group_size()
-  store i32 %r4, i32* @G_r4
+  store i32 %r4, ptr @G_r4
   %r5 = tail call spir_func i32 @get_max_sub_group_size()
-  store i32 %r5, i32* @G_r5
+  store i32 %r5, ptr @G_r5
   %r6 = tail call spir_func i32 @get_num_sub_groups()
-  store i32 %r6, i32* @G_r6
+  store i32 %r6, ptr @G_r6
   %r7 = tail call spir_func i32 @get_enqueued_num_sub_groups()
-  store i32 %r7, i32* @G_r7
+  store i32 %r7, ptr @G_r7
   %r8 = tail call spir_func i32 @get_sub_group_id()
-  store i32 %r8, i32* @G_r8
+  store i32 %r8, ptr @G_r8
   %r9 = tail call spir_func i32 @get_sub_group_local_id()
-  store i32 %r9, i32* @G_r9
+  store i32 %r9, ptr @G_r9
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/capability-Int64Atomics-store.ll b/llvm/test/CodeGen/SPIRV/capability-Int64Atomics-store.ll
index ee626db8d9672..cc731b6ce57f3 100644
--- a/llvm/test/CodeGen/SPIRV/capability-Int64Atomics-store.ll
+++ b/llvm/test/CodeGen/SPIRV/capability-Int64Atomics-store.ll
@@ -10,10 +10,10 @@
 
 ; CHECK: OpCapability Int64Atomics
 
-define spir_func void @foo(i64 addrspace(4)* %object, i64 %desired) {
+define spir_func void @foo(ptr addrspace(4) %object, i64 %desired) {
 entry:
-  tail call spir_func void @_Z12atomic_storePVU3AS4U7_Atomicll(i64 addrspace(4)* %object, i64 %desired)
+  tail call spir_func void @_Z12atomic_storePVU3AS4U7_Atomicll(ptr addrspace(4) %object, i64 %desired)
   ret void
 }
 
-declare spir_func void @_Z12atomic_storePVU3AS4U7_Atomicll(i64 addrspace(4)*, i64)
+declare spir_func void @_Z12atomic_storePVU3AS4U7_Atomicll(ptr addrspace(4), i64)
diff --git a/llvm/test/CodeGen/SPIRV/capability-Int64Atomics.ll b/llvm/test/CodeGen/SPIRV/capability-Int64Atomics.ll
index 65047f8c6e48f..5b22361b035ff 100644
--- a/llvm/test/CodeGen/SPIRV/capability-Int64Atomics.ll
+++ b/llvm/test/CodeGen/SPIRV/capability-Int64Atomics.ll
@@ -10,10 +10,10 @@
 
 ; CHECK: OpCapability Int64Atomics
 
-define spir_func void @foo(i64 addrspace(4)* %object, i64 %desired) {
+define spir_func void @foo(ptr addrspace(4) %object, i64 %desired) {
 entry:
-  %call = tail call spir_func i64 @_Z16atomic_fetch_xorPVU3AS4U7_Atomicll(i64 addrspace(4)* %object, i64 %desired)
+  %call = tail call spir_func i64 @_Z16atomic_fetch_xorPVU3AS4U7_Atomicll(ptr addrspace(4) %object, i64 %desired)
   ret void
 }
 
-declare spir_func i64 @_Z16atomic_fetch_xorPVU3AS4U7_Atomicll(i64 addrspace(4)*, i64)
+declare spir_func i64 @_Z16atomic_fetch_xorPVU3AS4U7_Atomicll(ptr addrspace(4), i64)
diff --git a/llvm/test/CodeGen/SPIRV/capability-kernel.ll b/llvm/test/CodeGen/SPIRV/capability-kernel.ll
index fea19511d4fdc..8240ded2db37a 100644
--- a/llvm/test/CodeGen/SPIRV/capability-kernel.ll
+++ b/llvm/test/CodeGen/SPIRV/capability-kernel.ll
@@ -4,7 +4,7 @@
 ; CHECK-DAG: OpCapability Addresses
 
 ; CHECK-DAG: OpCapability Linkage
-define spir_func void @func_export(i32 addrspace(1)* nocapture %a) {
+define spir_func void @func_export(ptr addrspace(1) nocapture %a) {
 entry:
 ; CHECK-DAG: OpCapability Int64
   %call = tail call spir_func i64 @_Z13get_global_idj(i32 0)
@@ -12,7 +12,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32 addrspace(1)* %a, align 4
+  store i32 1, ptr addrspace(1) %a, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
@@ -24,10 +24,10 @@ declare spir_func i64 @_Z13get_global_idj(i32)
 ; CHECK-DAG: OpCapability Kernel
 ; CHECK-NOT: OpCapability Shader
 ; CHECK-NOT: OpCapability Float64
-define spir_kernel void @func_kernel(i32 addrspace(1)* %a) {
+define spir_kernel void @func_kernel(ptr addrspace(1) %a) {
 entry:
-  tail call spir_func void @func_import(i32 addrspace(1)* %a)
+  tail call spir_func void @func_import(ptr addrspace(1) %a)
   ret void
 }
 
-declare spir_func void @func_import(i32 addrspace(1)*)
+declare spir_func void @func_import(ptr addrspace(1))
diff --git a/llvm/test/CodeGen/SPIRV/constant/global-constants.ll b/llvm/test/CodeGen/SPIRV/constant/global-constants.ll
index 43dbed8b044b5..c6bc6dcd5cb4f 100644
--- a/llvm/test/CodeGen/SPIRV/constant/global-constants.ll
+++ b/llvm/test/CodeGen/SPIRV/constant/global-constants.ll
@@ -6,17 +6,17 @@
 @local    = addrspace(3) constant i32 3 ; OpenCL local memory
 
 define i32 @getGlobal1() {
-  %g = load i32, i32 addrspace(1)* @global
+  %g = load i32, ptr addrspace(1) @global
   ret i32 %g
 }
 
 define i32 @getGlobal2() {
-  %g = load i32, i32 addrspace(2)* @constant
+  %g = load i32, ptr addrspace(2) @constant
   ret i32 %g
 }
 
 define i32 @getGlobal3() {
-  %g = load i32, i32 addrspace(3)* @local
+  %g = load i32, ptr addrspace(3) @local
   ret i32 %g
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/debug-info/opname-filtering.ll b/llvm/test/CodeGen/SPIRV/debug-info/opname-filtering.ll
index b709a78038a91..6e7ea5cc14cb1 100644
--- a/llvm/test/CodeGen/SPIRV/debug-info/opname-filtering.ll
+++ b/llvm/test/CodeGen/SPIRV/debug-info/opname-filtering.ll
@@ -45,6 +45,6 @@ entry:
 body:
   %add = add i32 %param, 1
   %sub = sub i32 %add, 1
-  store i32 %sub, i32* %localVar
+  store i32 %sub, ptr %localVar
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/empty.ll b/llvm/test/CodeGen/SPIRV/empty.ll
index 29af913a0daee..1f314d58e6420 100644
--- a/llvm/test/CodeGen/SPIRV/empty.ll
+++ b/llvm/test/CodeGen/SPIRV/empty.ll
@@ -6,9 +6,9 @@
 
 ; CHECK: OpCapability Addresses
 ; CHECK: "foo"
-define spir_kernel void @foo(i32 addrspace(1)* %a) {
+define spir_kernel void @foo(ptr addrspace(1) %a) {
 entry:
-  %a.addr = alloca i32 addrspace(1)*, align 4
-  store i32 addrspace(1)* %a, i32 addrspace(1)** %a.addr, align 4
+  %a.addr = alloca ptr addrspace(1), align 4
+  store ptr addrspace(1) %a, ptr %a.addr, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/event_no_group_cap.ll b/llvm/test/CodeGen/SPIRV/event_no_group_cap.ll
index af4ac7cf44500..e5da3163b53b4 100644
--- a/llvm/test/CodeGen/SPIRV/event_no_group_cap.ll
+++ b/llvm/test/CodeGen/SPIRV/event_no_group_cap.ll
@@ -10,12 +10,12 @@
 
 %opencl.event_t = type opaque
 
-define dso_local spir_kernel void @test_fn(i8 addrspace(1)* noundef %src) {
+define dso_local spir_kernel void @test_fn(ptr addrspace(1) noundef %src) {
 entry:
-  %src.addr = alloca i8 addrspace(1)*, align 8
-  store i8 addrspace(1)* %src, i8 addrspace(1)** %src.addr, align 8
-  call spir_func void @_Z17wait_group_eventsiPU3AS49ocl_event(i32 noundef 0, %opencl.event_t* addrspace(4)* noundef null)
+  %src.addr = alloca ptr addrspace(1), align 8
+  store ptr addrspace(1) %src, ptr %src.addr, align 8
+  call spir_func void @_Z17wait_group_eventsiPU3AS49ocl_event(i32 noundef 0, ptr addrspace(4) noundef null)
   ret void
 }
 
-declare spir_func void @_Z17wait_group_eventsiPU3AS49ocl_event(i32 noundef, %opencl.event_t* addrspace(4)* noundef)
+declare spir_func void @_Z17wait_group_eventsiPU3AS49ocl_event(i32 noundef, ptr addrspace(4) noundef)
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
index 1fcaca0ae36f2..e78bb94d992cf 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_shader_atomic_float_add/atomicrmw_faddfsub_float.ll
@@ -69,15 +69,15 @@ declare spir_func float @_Z25atomic_fetch_sub_explicitPU3AS1VU7_Atomicff12memory
 
 define dso_local spir_func void @test4(i64 noundef %arg, float %val) local_unnamed_addr {
 entry:
-  %ptr1 = inttoptr i64 %arg to float addrspace(1)*
+  %ptr1 = inttoptr i64 %arg to ptr addrspace(1)
   %v1 = atomicrmw fadd ptr addrspace(1) %ptr1, float %val seq_cst, align 4
-  %ptr2 = inttoptr i64 %arg to float addrspace(1)*
+  %ptr2 = inttoptr i64 %arg to ptr addrspace(1)
   %v2 = atomicrmw fsub ptr addrspace(1) %ptr2, float %val seq_cst, align 4
-  %ptr3 = inttoptr i64 %arg to float addrspace(1)*
+  %ptr3 = inttoptr i64 %arg to ptr addrspace(1)
   %v3 = tail call spir_func float @_Z21__spirv_AtomicFAddEXT(ptr addrspace(1) %ptr3, i32 1, i32 16, float %val)
-  %ptr4 = inttoptr i64 %arg to float addrspace(1)*
+  %ptr4 = inttoptr i64 %arg to ptr addrspace(1)
   %v4 = tail call spir_func float @_Z25atomic_fetch_add_explicitPU3AS1VU7_Atomicff12memory_order(ptr addrspace(1) %ptr4, float %val, i32 0)
-  %ptr5 = inttoptr i64 %arg to float addrspace(1)*
+  %ptr5 = inttoptr i64 %arg to ptr addrspace(1)
   %v5 = tail call spir_func float @_Z25atomic_fetch_sub_explicitPU3AS1VU7_Atomicff12memory_order(ptr addrspace(1) %ptr5, float %val, i32 0)
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_fp_max_error/IntelFPMaxErrorFPMath.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_fp_max_error/IntelFPMaxErrorFPMath.ll
index 34c3741cfc6ef..ca7566e1060c4 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_fp_max_error/IntelFPMaxErrorFPMath.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_fp_max_error/IntelFPMaxErrorFPMath.ll
@@ -22,12 +22,12 @@ ret float %f1
 ; CHECK: %[[#F3]] = OpFDiv %[[#FloatTy]]
 ; CHECK: %[[#Callee]] = OpFunctionCall %[[#FloatTy]] %[[#CalleeName]]
 
-define void @test_fp_max_error_decoration(float %f1, float %f2, float* %out) {
+define void @test_fp_max_error_decoration(float %f1, float %f2, ptr %out) {
 entry:
 %f3 = fdiv float %f1, %f2, !fpmath !0
-store volatile float %f3, float* %out
+store volatile float %f3, ptr %out
 %call = call float @callee(float %f1, float %f2), !fpmath !1
-store volatile float %call, float* %out
+store volatile float %call, ptr %out
 ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
index 7adb039464c4f..a114b40e5810c 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_bit_instructions.ll
@@ -7,10 +7,10 @@
 ; CHECK-EXTENSION: %[[#int:]] = OpTypeInt 32
 ; CHECK-EXTENSION: OpBitReverse %[[#int]]
 
-define spir_kernel void @testBitRev(i32 %a, i32 %b, i32 %c, i32 addrspace(1)* nocapture %res) local_unnamed_addr {
+define spir_kernel void @testBitRev(i32 %a, i32 %b, i32 %c, ptr addrspace(1) nocapture %res) local_unnamed_addr {
 entry:
   %call = tail call i32 @llvm.bitreverse.i32(i32 %b)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll
index c7326cd7b4cfa..d9a40ac266d35 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/decoration.ll
@@ -93,115 +93,115 @@ declare dso_local spir_func noundef nofpclass(nan inf) <2 x float> @_Z23__spirv_
 define weak_odr dso_local spir_kernel void @foo(float %1, float %2) {
 entry:
   %addRes = fadd float %1,  %2
-  store volatile float %addRes, float* @G_addRes
+  store volatile float %addRes, ptr @G_addRes
   ; CHECK: %[[#subRes]] = OpFSub
   %subRes = fsub nnan float %1,  %2
-  store volatile float %subRes, float* @G_subRes
+  store volatile float %subRes, ptr @G_subRes
   ; CHECK: %[[#mulRes]] = OpFMul
   %mulRes = fmul ninf float %1,  %2
-  store volatile float %mulRes, float* @G_mulRes
+  store volatile float %mulRes, ptr @G_mulRes
   ; CHECK: %[[#divRes]] = OpFDiv
   %divRes = fdiv nsz float %1,  %2
-  store volatile float %divRes, float* @G_divRes
+  store volatile float %divRes, ptr @G_divRes
   ; CHECK: %[[#remRes]] = OpFRem
   %remRes = frem arcp float %1,  %2
-  store volatile float %remRes, float* @G_remRes
+  store volatile float %remRes, ptr @G_remRes
   ; CHECK: %[[#negRes]] = OpFNegate
   %negRes = fneg fast float %1
-  store volatile float %negRes, float* @G_negRes
+  store volatile float %negRes, ptr @G_negRes
   ; CHECK: %[[#oeqRes]] = OpFOrdEqual
   %oeqRes = fcmp nnan ninf oeq float %1,  %2
-  store volatile i1 %oeqRes, i1* @G_oeqRes
+  store volatile i1 %oeqRes, ptr @G_oeqRes
   %oneRes = fcmp one float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %oneRes, i1* @G_oneRes
+  store volatile i1 %oneRes, ptr @G_oneRes
   ; CHECK: %[[#oltRes]] = OpFOrdLessThan
   %oltRes = fcmp nnan olt float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %oltRes, i1* @G_oltRes
+  store volatile i1 %oltRes, ptr @G_oltRes
   ; CHECK: %[[#ogtRes]] = OpFOrdGreaterThan
   %ogtRes = fcmp ninf ogt float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %ogtRes, i1* @G_ogtRes
+  store volatile i1 %ogtRes, ptr @G_ogtRes
   ; CHECK: %[[#oleRes]] = OpFOrdLessThanEqual
   %oleRes = fcmp nsz ole float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %oleRes, i1* @G_oleRes
+  store volatile i1 %oleRes, ptr @G_oleRes
   ; CHECK: %[[#ogeRes]] = OpFOrdGreaterThanEqual
   %ogeRes = fcmp arcp oge float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %ogeRes, i1* @G_ogeRes
+  store volatile i1 %ogeRes, ptr @G_ogeRes
   ; CHECK: %[[#ordRes]] = OpOrdered
   %ordRes = fcmp fast ord float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %ordRes, i1* @G_ordRes
+  store volatile i1 %ordRes, ptr @G_ordRes
   ; CHECK: %[[#ueqRes]] = OpFUnordEqual
   %ueqRes = fcmp nnan ninf ueq float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %ueqRes, i1* @G_ueqRes
+  store volatile i1 %ueqRes, ptr @G_ueqRes
   %uneRes = fcmp une float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %uneRes, i1* @G_uneRes
+  store volatile i1 %uneRes, ptr @G_uneRes
   %ultRes = fcmp ult float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %ultRes, i1* @G_ultRes
+  store volatile i1 %ultRes, ptr @G_ultRes
   %ugtRes = fcmp ugt float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %ugtRes, i1* @G_ugtRes
+  store volatile i1 %ugtRes, ptr @G_ugtRes
   %uleRes = fcmp ule float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %uleRes, i1* @G_uleRes
+  store volatile i1 %uleRes, ptr @G_uleRes
   %ugeRes = fcmp uge float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %ugeRes, i1* @G_ugeRes
+  store volatile i1 %ugeRes, ptr @G_ugeRes
   %unoRes = fcmp uno float %1,  %2, !spirv.Decorations !3
-  store volatile i1 %unoRes, i1* @G_unoRes
+  store volatile i1 %unoRes, ptr @G_unoRes
   %modRes = call spir_func float @_Z4fmodff(float %1, float %2)
-  store volatile float %modRes, float* @G_modRes
+  store volatile float %modRes, ptr @G_modRes
   ; CHECK: %[[#maxRes]] = OpExtInst %[[#]] %[[#]] fmax
   %maxRes = tail call fast spir_func noundef nofpclass(nan inf) float @_Z16__spirv_ocl_fmaxff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2)
-  store volatile float %maxRes, float* @G_maxRes
+  store volatile float %maxRes, ptr @G_maxRes
    ; CHECK: %[[#maxCommonRes]] = OpExtInst %[[#]] %[[#]] fmax
    %maxCommonRes = tail call spir_func noundef float @_Z23__spirv_ocl_fmax_commonff(float noundef nofpclass(nan inf) %1, float noundef nofpclass(nan inf) %2)
-  store volatile float %maxCommonRes, float* @G_maxCommonRes
+  store volatile float %maxCommonRes, ptr @G_maxCommonRes
   ret void
 }
 
 define weak_odr dso_local spir_kernel void @fooV(<2 x float> %v1, <2 x float> %v2) {
   %addResV = fadd <2 x float> %v1,  %v2
-  store volatile <2 x float> %addResV, <2 x float>* @G_addResV
+  store volatile <2 x float> %addResV, ptr @G_addResV
   %subResV = fsub nnan <2 x float> %v1,  %v2
-  store volatile <2 x float> %subResV, <2 x float>* @G_subResV
+  store volatile <2 x float> %subResV, ptr @G_subResV
   %mulResV = fmul ninf <2 x float> %v1,  %v2
-  store volatile <2 x float> %mulResV, <2 x float>* @G_mulResV
+  store volatile <2 x float> %mulResV, ptr @G_mulResV
   %divResV = fdiv nsz <2 x float> %v1,  %v2
-  store volatile <2 x float> %divResV, <2 x float>* @G_divResV
+  store volatile <2 x float> %divResV, ptr @G_divResV
   %remResV = frem arcp <2 x float> %v1,  %v2
-  store volatile <2 x float> %remResV, <2 x float>* @G_remResV
+  store volatile <2 x float> %remResV, ptr @G_remResV
   %negResV = fneg fast <2 x float> %v1
-  store volatile <2 x float> %negResV, <2 x float>* @G_negResV
+  store volatile <2 x float> %negResV, ptr @G_negResV
   %oeqResV = fcmp nnan ninf oeq <2 x float> %v1,  %v2
-  store volatile <2 x i1> %oeqResV, <2 x i1>* @G_oeqResV
+  store volatile <2 x i1> %oeqResV, ptr @G_oeqResV
   %oneResV = fcmp one <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %oneResV, <2 x i1>* @G_oneResV
+  store volatile <2 x i1> %oneResV, ptr @G_oneResV
   %oltResV = fcmp nnan olt <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %oltResV, <2 x i1>* @G_oltResV
+  store volatile <2 x i1> %oltResV, ptr @G_oltResV
   %ogtResV = fcmp ninf ogt <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %ogtResV, <2 x i1>* @G_ogtResV
+  store volatile <2 x i1> %ogtResV, ptr @G_ogtResV
   %oleResV = fcmp nsz ole <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %oleResV, <2 x i1>* @G_oleResV
+  store volatile <2 x i1> %oleResV, ptr @G_oleResV
   %ogeResV = fcmp arcp oge <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %ogeResV, <2 x i1>* @G_ogeResV
+  store volatile <2 x i1> %ogeResV, ptr @G_ogeResV
   %ordResV = fcmp fast ord <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %ordResV, <2 x i1>* @G_ordResV
+  store volatile <2 x i1> %ordResV, ptr @G_ordResV
   %ueqResV = fcmp nnan ninf ueq <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %ueqResV, <2 x i1>* @G_ueqResV
+  store volatile <2 x i1> %ueqResV, ptr @G_ueqResV
   %uneResV = fcmp une <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %uneResV, <2 x i1>* @G_uneResV
+  store volatile <2 x i1> %uneResV, ptr @G_uneResV
   %ultResV = fcmp ult <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %ultResV, <2 x i1>* @G_ultResV
+  store volatile <2 x i1> %ultResV, ptr @G_ultResV
   %ugtResV = fcmp ugt <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %ugtResV, <2 x i1>* @G_ugtResV
+  store volatile <2 x i1> %ugtResV, ptr @G_ugtResV
   %uleResV = fcmp ule <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %uleResV, <2 x i1>* @G_uleResV
+  store volatile <2 x i1> %uleResV, ptr @G_uleResV
   %ugeResV = fcmp uge <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %ugeResV, <2 x i1>* @G_ugeResV
+  store volatile <2 x i1> %ugeResV, ptr @G_ugeResV
   %unoResV = fcmp uno <2 x float> %v1,  %v2, !spirv.Decorations !3
-  store volatile <2 x i1> %unoResV, <2 x i1>* @G_unoResV
+  store volatile <2 x i1> %unoResV, ptr @G_unoResV
   %modResV = call spir_func <2 x float> @_Z4fmodDv2_fDv2_f(<2 x float> %v1, <2 x float> %v2)
-  store volatile <2 x float> %modResV, <2 x float>* @G_modResV
+  store volatile <2 x float> %modResV, ptr @G_modResV
   %maxResV = tail call fast spir_func noundef nofpclass(nan inf) <2 x float> @_Z16__spirv_ocl_fmaxDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2)
-  store volatile <2 x float> %maxResV, <2 x float>* @G_maxResV
+  store volatile <2 x float> %maxResV, ptr @G_maxResV
    %maxCommonResV = tail call spir_func noundef <2 x float> @_Z23__spirv_ocl_fmax_commonDv2_fDv2_f(<2 x float> noundef nofpclass(nan inf) %v1, <2 x float> noundef nofpclass(nan inf) %v2)
-  store volatile <2 x float> %maxCommonResV, <2 x float>* @G_maxCommonResV
+  store volatile <2 x float> %maxCommonResV, ptr @G_maxCommonResV
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/disabled-on-amd.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/disabled-on-amd.ll
index 8619ee9881300..0de9e9588c55f 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/disabled-on-amd.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_float_controls2/disabled-on-amd.ll
@@ -14,10 +14,10 @@
 ; CHECK: SPV_KHR_float_controls2
 ; CHECK-AMD-NOT: SPV_KHR_float_controls2
 
-define spir_kernel void @foo(float %a, float %b, float addrspace(1)* %out) {
+define spir_kernel void @foo(float %a, float %b, ptr addrspace(1) %out) {
 entry:
   ; Use contract to trigger a use of SPV_KHR_float_controls2
   %r1 = fadd contract float %a, %b
-  store volatile float %r1, float addrspace(1)* %out
+  store volatile float %r1, ptr addrspace(1) %out
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/function/alloca-load-store.ll b/llvm/test/CodeGen/SPIRV/function/alloca-load-store.ll
index 55ab715feff3d..8ff94dbb8c6e5 100644
--- a/llvm/test/CodeGen/SPIRV/function/alloca-load-store.ll
+++ b/llvm/test/CodeGen/SPIRV/function/alloca-load-store.ll
@@ -12,8 +12,8 @@
 
 define i32 @bar(i32 %a) {
   %p = alloca i32
-  store i32 %a, i32* %p
-  %b = load i32, i32* %p
+  store i32 %a, ptr %p
+  %b = load i32, ptr %p
   ret i32 %b
 }
 
@@ -29,8 +29,8 @@ define i32 @bar(i32 %a) {
 
 define i32 @foo(i32 %a) {
   %p = alloca i32
-  store volatile i32 %a, i32* %p
-  %b = load volatile i32, i32* %p
+  store volatile i32 %a, ptr %p
+  %b = load volatile i32, ptr %p
   ret i32 %b
 }
 
@@ -46,8 +46,8 @@ define i32 @foo(i32 %a) {
 
 ;; Test load and store in global address space.
 define i32 @goo(i32 %a, ptr addrspace(1) %p) {
-  store i32 %a, i32 addrspace(1)* %p
-  %b = load i32, i32 addrspace(1)* %p
+  store i32 %a, ptr addrspace(1) %p
+  %b = load i32, ptr addrspace(1) %p
   ret i32 %b
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/half_extension.ll b/llvm/test/CodeGen/SPIRV/half_extension.ll
index b30e5514c95be..02f6dc6eda6f9 100644
--- a/llvm/test/CodeGen/SPIRV/half_extension.ll
+++ b/llvm/test/CodeGen/SPIRV/half_extension.ll
@@ -16,16 +16,16 @@ define spir_func half @test() {
 entry:
   %x = alloca half, align 2
   %y = alloca half, align 2
-  store half 0xH2E66, half* %x, align 2
-  %0 = load half, half* %x, align 2
+  store half 0xH2E66, ptr %x, align 2
+  %0 = load half, ptr %x, align 2
   %conv = fpext half %0 to float
   %add = fadd float %conv, 2.000000e+00
   %conv1 = fptrunc float %add to half
-  store half %conv1, half* %x, align 2
-  %1 = load half, half* %x, align 2
-  %2 = load half, half* %x, align 2
+  store half %conv1, ptr %x, align 2
+  %1 = load half, ptr %x, align 2
+  %2 = load half, ptr %x, align 2
   %add2 = fadd half %1, %2
-  store half %add2, half* %y, align 2
-  %3 = load half, half* %y, align 2
+  store half %add2, ptr %y, align 2
+  %3 = load half, ptr %y, align 2
   ret half %3
 }
diff --git a/llvm/test/CodeGen/SPIRV/half_no_extension.ll b/llvm/test/CodeGen/SPIRV/half_no_extension.ll
index 63cecbc7399ae..ce13f4609fe11 100644
--- a/llvm/test/CodeGen/SPIRV/half_no_extension.ll
+++ b/llvm/test/CodeGen/SPIRV/half_no_extension.ll
@@ -11,21 +11,21 @@
 ; CHECK-DAG: OpCapability Float16Buffer
 ; CHECK-DAG: OpCapability Float16
 
-define spir_kernel void @test(<4 x float> addrspace(1)* %p, half addrspace(1)* %f) {
+define spir_kernel void @test(ptr addrspace(1) %p, ptr addrspace(1) %f) {
 entry:
-  %p.addr = alloca <4 x float> addrspace(1)*, align 8
-  %f.addr = alloca half addrspace(1)*, align 8
+  %p.addr = alloca ptr addrspace(1), align 8
+  %f.addr = alloca ptr addrspace(1), align 8
   %data = alloca <4 x float>, align 16
-  store <4 x float> addrspace(1)* %p, <4 x float> addrspace(1)** %p.addr, align 8
-  store half addrspace(1)* %f, half addrspace(1)** %f.addr, align 8
-  %0 = load <4 x float> addrspace(1)*, <4 x float> addrspace(1)** %p.addr, align 8
-  %arrayidx = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %0, i64 0
-  %1 = load <4 x float>, <4 x float> addrspace(1)* %arrayidx, align 16
-  store <4 x float> %1, <4 x float>* %data, align 16
-  %2 = load <4 x float>, <4 x float>* %data, align 16
-  %3 = load half addrspace(1)*, half addrspace(1)** %f.addr, align 8
-  call spir_func void @_Z17vstorea_half4_rtpDv4_fmPU3AS1Dh(<4 x float> %2, i64 0, half addrspace(1)* %3)
+  store ptr addrspace(1) %p, ptr %p.addr, align 8
+  store ptr addrspace(1) %f, ptr %f.addr, align 8
+  %0 = load ptr addrspace(1), ptr %p.addr, align 8
+  %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(1) %0, i64 0
+  %1 = load <4 x float>, ptr addrspace(1) %arrayidx, align 16
+  store <4 x float> %1, ptr %data, align 16
+  %2 = load <4 x float>, ptr %data, align 16
+  %3 = load ptr addrspace(1), ptr %f.addr, align 8
+  call spir_func void @_Z17vstorea_half4_rtpDv4_fmPU3AS1Dh(<4 x float> %2, i64 0, ptr addrspace(1) %3)
   ret void
 }
 
-declare spir_func void @_Z17vstorea_half4_rtpDv4_fmPU3AS1Dh(<4 x float>, i64, half addrspace(1)*)
+declare spir_func void @_Z17vstorea_half4_rtpDv4_fmPU3AS1Dh(<4 x float>, i64, ptr addrspace(1))
diff --git a/llvm/test/CodeGen/SPIRV/image-unoptimized.ll b/llvm/test/CodeGen/SPIRV/image-unoptimized.ll
index d7d5b1d6b7562..afbac3e95caf4 100644
--- a/llvm/test/CodeGen/SPIRV/image-unoptimized.ll
+++ b/llvm/test/CodeGen/SPIRV/image-unoptimized.ll
@@ -43,39 +43,39 @@
 ;;   results[tid_x + tid_y * get_image_width(srcimg)] = read_imagef(srcimg, sampler, (int2){tid_x, tid_y});
 ;; }
 
-define dso_local spir_kernel void @test_fn(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %srcimg, target("spirv.Sampler") %sampler, <4 x float> addrspace(1)* noundef %results) {
+define dso_local spir_kernel void @test_fn(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %srcimg, target("spirv.Sampler") %sampler, ptr addrspace(1) noundef %results) {
 entry:
   %srcimg.addr = alloca target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), align 4
   %sampler.addr = alloca target("spirv.Sampler"), align 4
-  %results.addr = alloca <4 x float> addrspace(1)*, align 4
+  %results.addr = alloca ptr addrspace(1), align 4
   %tid_x = alloca i32, align 4
   %tid_y = alloca i32, align 4
   %.compoundliteral = alloca <2 x i32>, align 8
   store target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %srcimg, target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0)* %srcimg.addr, align 4
   store target("spirv.Sampler") %sampler, target("spirv.Sampler")* %sampler.addr, align 4
-  store <4 x float> addrspace(1)* %results, <4 x float> addrspace(1)** %results.addr, align 4
+  store ptr addrspace(1) %results, ptr %results.addr, align 4
   %call = call spir_func i32 @_Z13get_global_idj(i32 noundef 0)
-  store i32 %call, i32* %tid_x, align 4
+  store i32 %call, ptr %tid_x, align 4
   %call1 = call spir_func i32 @_Z13get_global_idj(i32 noundef 1)
-  store i32 %call1, i32* %tid_y, align 4
+  store i32 %call1, ptr %tid_y, align 4
   %0 = load target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0)* %srcimg.addr, align 4
   %1 = load target("spirv.Sampler"), target("spirv.Sampler")* %sampler.addr, align 4
-  %2 = load i32, i32* %tid_x, align 4
+  %2 = load i32, ptr %tid_x, align 4
   %vecinit = insertelement <2 x i32> undef, i32 %2, i32 0
-  %3 = load i32, i32* %tid_y, align 4
+  %3 = load i32, ptr %tid_y, align 4
   %vecinit2 = insertelement <2 x i32> %vecinit, i32 %3, i32 1
-  store <2 x i32> %vecinit2, <2 x i32>* %.compoundliteral, align 8
-  %4 = load <2 x i32>, <2 x i32>* %.compoundliteral, align 8
+  store <2 x i32> %vecinit2, ptr %.compoundliteral, align 8
+  %4 = load <2 x i32>, ptr %.compoundliteral, align 8
   %call3 = call spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_i(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %0, target("spirv.Sampler") %1, <2 x i32> noundef %4)
-  %5 = load <4 x float> addrspace(1)*, <4 x float> addrspace(1)** %results.addr, align 4
-  %6 = load i32, i32* %tid_x, align 4
-  %7 = load i32, i32* %tid_y, align 4
+  %5 = load ptr addrspace(1), ptr %results.addr, align 4
+  %6 = load i32, ptr %tid_x, align 4
+  %7 = load i32, ptr %tid_y, align 4
   %8 = load target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0)* %srcimg.addr, align 4
   %call4 = call spir_func i32 @_Z15get_image_width14ocl_image2d_ro(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %8)
   %mul = mul nsw i32 %7, %call4
   %add = add nsw i32 %6, %mul
-  %arrayidx = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %5, i32 %add
-  store <4 x float> %call3, <4 x float> addrspace(1)* %arrayidx, align 16
+  %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(1) %5, i32 %add
+  store <4 x float> %call3, ptr addrspace(1) %arrayidx, align 16
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
index f4e7b128f77a3..7edd65538847c 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic.ll
@@ -29,8 +29,8 @@
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_add(i32* %ptr, i32 %val) {
-  %r = atomicrmw add i32* %ptr, i32 %val monotonic
+define i32 @test_add(ptr %ptr, i32 %val) {
+  %r = atomicrmw add ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
@@ -41,8 +41,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_sub(i32* %ptr, i32 %val) {
-  %r = atomicrmw sub i32* %ptr, i32 %val monotonic
+define i32 @test_sub(ptr %ptr, i32 %val) {
+  %r = atomicrmw sub ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
@@ -53,8 +53,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_min(i32* %ptr, i32 %val) {
-  %r = atomicrmw min i32* %ptr, i32 %val monotonic
+define i32 @test_min(ptr %ptr, i32 %val) {
+  %r = atomicrmw min ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
@@ -65,8 +65,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_max(i32* %ptr, i32 %val) {
-  %r = atomicrmw max i32* %ptr, i32 %val monotonic
+define i32 @test_max(ptr %ptr, i32 %val) {
+  %r = atomicrmw max ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
@@ -77,8 +77,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_umin(i32* %ptr, i32 %val) {
-  %r = atomicrmw umin i32* %ptr, i32 %val monotonic
+define i32 @test_umin(ptr %ptr, i32 %val) {
+  %r = atomicrmw umin ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
@@ -89,8 +89,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_umax(i32* %ptr, i32 %val) {
-  %r = atomicrmw umax i32* %ptr, i32 %val monotonic
+define i32 @test_umax(ptr %ptr, i32 %val) {
+  %r = atomicrmw umax ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
@@ -101,8 +101,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_and(i32* %ptr, i32 %val) {
-  %r = atomicrmw and i32* %ptr, i32 %val monotonic
+define i32 @test_and(ptr %ptr, i32 %val) {
+  %r = atomicrmw and ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
@@ -113,8 +113,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_or(i32* %ptr, i32 %val) {
-  %r = atomicrmw or i32* %ptr, i32 %val monotonic
+define i32 @test_or(ptr %ptr, i32 %val) {
+  %r = atomicrmw or ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
@@ -125,8 +125,8 @@ define i32 @test_or(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[CROSSDEVICESCOPE]] {{.+}} [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_xor(i32* %ptr, i32 %val) {
-  %r = atomicrmw xor i32* %ptr, i32 %val monotonic
+define i32 @test_xor(ptr %ptr, i32 %val) {
+  %r = atomicrmw xor ptr %ptr, i32 %val monotonic
   ret i32 %r
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
index 4d5aca6d404de..7842ee0c38811 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic_acqrel.ll
@@ -25,8 +25,8 @@
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_add(i32* %ptr, i32 %val) {
-  %r = atomicrmw add i32* %ptr, i32 %val acq_rel
+define i32 @test_add(ptr %ptr, i32 %val) {
+  %r = atomicrmw add ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
 
@@ -37,8 +37,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_sub(i32* %ptr, i32 %val) {
-  %r = atomicrmw sub i32* %ptr, i32 %val acq_rel
+define i32 @test_sub(ptr %ptr, i32 %val) {
+  %r = atomicrmw sub ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
 
@@ -49,8 +49,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_min(i32* %ptr, i32 %val) {
-  %r = atomicrmw min i32* %ptr, i32 %val acq_rel
+define i32 @test_min(ptr %ptr, i32 %val) {
+  %r = atomicrmw min ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
 
@@ -61,8 +61,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_max(i32* %ptr, i32 %val) {
-  %r = atomicrmw max i32* %ptr, i32 %val acq_rel
+define i32 @test_max(ptr %ptr, i32 %val) {
+  %r = atomicrmw max ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
 
@@ -73,8 +73,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_umin(i32* %ptr, i32 %val) {
-  %r = atomicrmw umin i32* %ptr, i32 %val acq_rel
+define i32 @test_umin(ptr %ptr, i32 %val) {
+  %r = atomicrmw umin ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
 
@@ -85,8 +85,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_umax(i32* %ptr, i32 %val) {
-  %r = atomicrmw umax i32* %ptr, i32 %val acq_rel
+define i32 @test_umax(ptr %ptr, i32 %val) {
+  %r = atomicrmw umax ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
 
@@ -97,8 +97,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_and(i32* %ptr, i32 %val) {
-  %r = atomicrmw and i32* %ptr, i32 %val acq_rel
+define i32 @test_and(ptr %ptr, i32 %val) {
+  %r = atomicrmw and ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
 
@@ -109,8 +109,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_or(i32* %ptr, i32 %val) {
-  %r = atomicrmw or i32* %ptr, i32 %val acq_rel
+define i32 @test_or(ptr %ptr, i32 %val) {
+  %r = atomicrmw or ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
 
@@ -121,7 +121,7 @@ define i32 @test_or(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[ACQREL]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_xor(i32* %ptr, i32 %val) {
-  %r = atomicrmw xor i32* %ptr, i32 %val acq_rel
+define i32 @test_xor(ptr %ptr, i32 %val) {
+  %r = atomicrmw xor ptr %ptr, i32 %val acq_rel
   ret i32 %r
 }
diff --git a/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll b/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
index 9fd3d8e95b5f1..df0a365f097d4 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/atomic_seq.ll
@@ -25,8 +25,8 @@
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicIAdd [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_add(i32* %ptr, i32 %val) {
-  %r = atomicrmw add i32* %ptr, i32 %val seq_cst
+define i32 @test_add(ptr %ptr, i32 %val) {
+  %r = atomicrmw add ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
 
@@ -37,8 +37,8 @@ define i32 @test_add(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicISub [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_sub(i32* %ptr, i32 %val) {
-  %r = atomicrmw sub i32* %ptr, i32 %val seq_cst
+define i32 @test_sub(ptr %ptr, i32 %val) {
+  %r = atomicrmw sub ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
 
@@ -49,8 +49,8 @@ define i32 @test_sub(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicSMin [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_min(i32* %ptr, i32 %val) {
-  %r = atomicrmw min i32* %ptr, i32 %val seq_cst
+define i32 @test_min(ptr %ptr, i32 %val) {
+  %r = atomicrmw min ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
 
@@ -61,8 +61,8 @@ define i32 @test_min(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicSMax [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_max(i32* %ptr, i32 %val) {
-  %r = atomicrmw max i32* %ptr, i32 %val seq_cst
+define i32 @test_max(ptr %ptr, i32 %val) {
+  %r = atomicrmw max ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
 
@@ -73,8 +73,8 @@ define i32 @test_max(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicUMin [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_umin(i32* %ptr, i32 %val) {
-  %r = atomicrmw umin i32* %ptr, i32 %val seq_cst
+define i32 @test_umin(ptr %ptr, i32 %val) {
+  %r = atomicrmw umin ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
 
@@ -85,8 +85,8 @@ define i32 @test_umin(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicUMax [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_umax(i32* %ptr, i32 %val) {
-  %r = atomicrmw umax i32* %ptr, i32 %val seq_cst
+define i32 @test_umax(ptr %ptr, i32 %val) {
+  %r = atomicrmw umax ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
 
@@ -97,8 +97,8 @@ define i32 @test_umax(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicAnd [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_and(i32* %ptr, i32 %val) {
-  %r = atomicrmw and i32* %ptr, i32 %val seq_cst
+define i32 @test_and(ptr %ptr, i32 %val) {
+  %r = atomicrmw and ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
 
@@ -109,8 +109,8 @@ define i32 @test_and(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicOr [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_or(i32* %ptr, i32 %val) {
-  %r = atomicrmw or i32* %ptr, i32 %val seq_cst
+define i32 @test_or(ptr %ptr, i32 %val) {
+  %r = atomicrmw or ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
 
@@ -121,7 +121,7 @@ define i32 @test_or(i32* %ptr, i32 %val) {
 ; CHECK-NEXT: [[R:%.*]] = OpAtomicXor [[I32Ty]] [[A]] [[SCOPE]] [[SEQ]] [[B]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i32 @test_xor(i32* %ptr, i32 %val) {
-  %r = atomicrmw xor i32* %ptr, i32 %val seq_cst
+define i32 @test_xor(ptr %ptr, i32 %val) {
+  %r = atomicrmw xor ptr %ptr, i32 %val seq_cst
   ret i32 %r
 }
diff --git a/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll b/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll
index 792f7b9c194ba..f11e53577c10f 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/bitwise-i1.ll
@@ -25,23 +25,23 @@
 ; CHECK: OpLogicalOr %[[#Vec2Bool]]
 ; CHECK: OpLogicalNotEqual %[[#Vec2Bool]]
 
-define void @test1(i8 noundef %arg1, i8 noundef %arg2, i8 addrspace(1)* %out) {
+define void @test1(i8 noundef %arg1, i8 noundef %arg2, ptr addrspace(1) %out) {
   %cond1 = and i8 %arg1, %arg2
-  store volatile i8 %cond1, i8 addrspace(1)* %out
+  store volatile i8 %cond1, ptr addrspace(1) %out
   %cond2 = or i8 %arg1, %arg2
-  store volatile i8 %cond2, i8 addrspace(1)* %out
+  store volatile i8 %cond2, ptr addrspace(1) %out
   %cond3 = xor i8 %arg1, %arg2
-  store volatile i8 %cond3, i8 addrspace(1)* %out
+  store volatile i8 %cond3, ptr addrspace(1) %out
   ret void
 }
 
-define void @test1v(<2 x i8> noundef %arg1, <2 x i8> noundef %arg2, <2 x i8> addrspace(1)* %out) {
+define void @test1v(<2 x i8> noundef %arg1, <2 x i8> noundef %arg2, ptr addrspace(1) %out) {
   %cond1 = and <2 x i8> %arg1, %arg2
-  store volatile <2 x i8> %cond1, <2 x i8> addrspace(1)* %out
+  store volatile <2 x i8> %cond1, ptr addrspace(1) %out
   %cond2 = or <2 x i8> %arg1, %arg2
-  store volatile <2 x i8> %cond2, <2 x i8> addrspace(1)* %out
+  store volatile <2 x i8> %cond2, ptr addrspace(1) %out
   %cond3 = xor <2 x i8> %arg1, %arg2
-  store volatile <2 x i8> %cond3, <2 x i8> addrspace(1)* %out
+  store volatile <2 x i8> %cond3, ptr addrspace(1) %out
   ret void
 }
 
@@ -58,23 +58,23 @@ cleanup:
   ret void
 }
 
-define void @test3(i1 noundef %arg1, i1 noundef %arg2, i1 addrspace(1)* %out) {
+define void @test3(i1 noundef %arg1, i1 noundef %arg2, ptr addrspace(1) %out) {
   %cond1 = and i1 %arg1, %arg2
-  store volatile i1 %cond1, i1 addrspace(1)* %out
+  store volatile i1 %cond1, ptr addrspace(1) %out
   %cond2 = or i1 %arg1, %arg2
-  store volatile i1 %cond2, i1 addrspace(1)* %out
+  store volatile i1 %cond2, ptr addrspace(1) %out
   %cond3 = xor i1 %arg1, %arg2
-  store volatile i1 %cond3, i1 addrspace(1)* %out
+  store volatile i1 %cond3, ptr addrspace(1) %out
   ret void
 }
 
-define void @test3v(<2 x i1> noundef %arg1, <2 x i1> noundef %arg2, <2 x i1> addrspace(1)* %out) {
+define void @test3v(<2 x i1> noundef %arg1, <2 x i1> noundef %arg2, ptr addrspace(1) %out) {
   %cond1 = and <2 x i1> %arg1, %arg2
-  store volatile <2 x i1> %cond1, <2 x i1> addrspace(1)* %out
+  store volatile <2 x i1> %cond1, ptr addrspace(1) %out
   %cond2 = or <2 x i1> %arg1, %arg2
-  store volatile <2 x i1> %cond2, <2 x i1> addrspace(1)* %out
+  store volatile <2 x i1> %cond2, ptr addrspace(1) %out
   %cond3 = xor <2 x i1> %arg1, %arg2
-  store volatile <2 x i1> %cond3, <2 x i1> addrspace(1)* %out
+  store volatile <2 x i1> %cond3, ptr addrspace(1) %out
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll b/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
index d075007777ddf..bc70210c26bf3 100644
--- a/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
+++ b/llvm/test/CodeGen/SPIRV/instructions/ptrcmp.ll
@@ -28,8 +28,8 @@
 ; CHECK-COMPAT: [[EQ]] = OpFunction
 ; CHECK-COMPAT-NOT: OpPtrEqual
 ; CHECK-COMPAT: OpFunctionEnd
-define i1 @test_eq(i16* %a, i16* %b) {
-  %r = icmp eq i16* %a, %b
+define i1 @test_eq(ptr %a, ptr %b) {
+  %r = icmp eq ptr %a, %b
   ret i1 %r
 }
 
@@ -43,8 +43,8 @@ define i1 @test_eq(i16* %a, i16* %b) {
 ; CHECK-COMPAT: [[NE]] = OpFunction
 ; CHECK-COMPAT-NOT: OpPtrNotEqual
 ; CHECK-COMPAT: OpFunctionEnd
-define i1 @test_ne(i16* %a, i16* %b) {
-  %r = icmp ne i16* %a, %b
+define i1 @test_ne(ptr %a, ptr %b) {
+  %r = icmp ne ptr %a, %b
   ret i1 %r
 }
 
@@ -57,8 +57,8 @@ define i1 @test_ne(i16* %a, i16* %b) {
 ; CHECK:      [[R:%.*]] = OpSLessThan {{%.+}} [[AI]] [[BI]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i1 @test_slt(i16* %a, i16* %b) {
-  %r = icmp slt i16* %a, %b
+define i1 @test_slt(ptr %a, ptr %b) {
+  %r = icmp slt ptr %a, %b
   ret i1 %r
 }
 
@@ -71,8 +71,8 @@ define i1 @test_slt(i16* %a, i16* %b) {
 ; CHECK:      [[R:%.*]] = OpULessThan {{%.+}} [[AI]] [[BI]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i1 @test_ult(i16* %a, i16* %b) {
-  %r = icmp ult i16* %a, %b
+define i1 @test_ult(ptr %a, ptr %b) {
+  %r = icmp ult ptr %a, %b
   ret i1 %r
 }
 
@@ -85,8 +85,8 @@ define i1 @test_ult(i16* %a, i16* %b) {
 ; CHECK:      [[R:%.*]] = OpULessThanEqual {{%.+}} [[AI]] [[BI]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i1 @test_ule(i16* %a, i16* %b) {
-  %r = icmp ule i16* %a, %b
+define i1 @test_ule(ptr %a, ptr %b) {
+  %r = icmp ule ptr %a, %b
   ret i1 %r
 }
 
@@ -99,8 +99,8 @@ define i1 @test_ule(i16* %a, i16* %b) {
 ; CHECK:      [[R:%.*]] = OpSLessThanEqual {{%.+}} [[AI]] [[BI]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i1 @test_sle(i16* %a, i16* %b) {
-  %r = icmp sle i16* %a, %b
+define i1 @test_sle(ptr %a, ptr %b) {
+  %r = icmp sle ptr %a, %b
   ret i1 %r
 }
 
@@ -113,8 +113,8 @@ define i1 @test_sle(i16* %a, i16* %b) {
 ; CHECK:      [[R:%.*]] = OpUGreaterThan {{%.+}} [[AI]] [[BI]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i1 @test_ugt(i16* %a, i16* %b) {
-  %r = icmp ugt i16* %a, %b
+define i1 @test_ugt(ptr %a, ptr %b) {
+  %r = icmp ugt ptr %a, %b
   ret i1 %r
 }
 
@@ -127,8 +127,8 @@ define i1 @test_ugt(i16* %a, i16* %b) {
 ; CHECK:      [[R:%.*]] = OpSGreaterThan {{%.+}} [[AI]] [[BI]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i1 @test_sgt(i16* %a, i16* %b) {
-  %r = icmp sgt i16* %a, %b
+define i1 @test_sgt(ptr %a, ptr %b) {
+  %r = icmp sgt ptr %a, %b
   ret i1 %r
 }
 
@@ -141,8 +141,8 @@ define i1 @test_sgt(i16* %a, i16* %b) {
 ; CHECK:      [[R:%.*]] = OpUGreaterThanEqual {{%.+}} [[AI]] [[BI]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i1 @test_uge(i16* %a, i16* %b) {
-  %r = icmp uge i16* %a, %b
+define i1 @test_uge(ptr %a, ptr %b) {
+  %r = icmp uge ptr %a, %b
   ret i1 %r
 }
 
@@ -155,7 +155,7 @@ define i1 @test_uge(i16* %a, i16* %b) {
 ; CHECK:      [[R:%.*]] = OpSGreaterThanEqual {{%.+}} [[AI]] [[BI]]
 ; CHECK-NEXT: OpReturnValue [[R]]
 ; CHECK-NEXT: OpFunctionEnd
-define i1 @test_sge(i16* %a, i16* %b) {
-  %r = icmp sge i16* %a, %b
+define i1 @test_sge(ptr %a, ptr %b) {
+  %r = icmp sge ptr %a, %b
   ret i1 %r
 }
diff --git a/llvm/test/CodeGen/SPIRV/linkage/linkage-types.ll b/llvm/test/CodeGen/SPIRV/linkage/linkage-types.ll
index e941768204ba8..b1c48db1f209f 100644
--- a/llvm/test/CodeGen/SPIRV/linkage/linkage-types.ll
+++ b/llvm/test/CodeGen/SPIRV/linkage/linkage-types.ll
@@ -73,16 +73,16 @@ define spir_func void @f() {
 entry:
   %q = alloca i32, align 4
   %r = alloca i32, align 4
-  %0 = load i32, i32 addrspace(1)* @i2, align 4
-  store i32 %0, i32* %q, align 4
-  %1 = load i32, i32 addrspace(1)* @i3, align 4
-  store i32 %1, i32 addrspace(1)* @i5, align 4
-  %2 = load i32, i32 addrspace(1)* @e, align 4
-  store i32 %2, i32* %r, align 4
-  %3 = load i32, i32 addrspace(2)* getelementptr inbounds ([256 x i32], [256 x i32] addrspace(2)* @noise_table, i32 0, i32 0), align 4
-  store i32 %3, i32* %r, align 4
-  %4 = load i32, i32 addrspace(2)* getelementptr inbounds ([2 x i32], [2 x i32] addrspace(2)* @f.color_table, i32 0, i32 0), align 4
-  store i32 %4, i32* %r, align 4
+  %0 = load i32, ptr addrspace(1) @i2, align 4
+  store i32 %0, ptr %q, align 4
+  %1 = load i32, ptr addrspace(1) @i3, align 4
+  store i32 %1, ptr addrspace(1) @i5, align 4
+  %2 = load i32, ptr addrspace(1) @e, align 4
+  store i32 %2, ptr %r, align 4
+  %3 = load i32, ptr addrspace(2) @noise_table, align 4
+  store i32 %3, ptr %r, align 4
+  %4 = load i32, ptr addrspace(2) @f.color_table, align 4
+  store i32 %4, ptr %r, align 4
   %call = call spir_func i32 @g()
   call spir_func void @inline_fun()
   ret void
@@ -99,8 +99,8 @@ entry:
 ;; "linkonce_odr" is lost in translation !
 define linkonce_odr spir_func void @inline_fun() {
 entry:
-  %t = alloca i32 addrspace(1)*, align 4
-  store i32 addrspace(1)* @i1, i32 addrspace(1)** %t, align 4
+  %t = alloca ptr addrspace(1), align 4
+  store ptr addrspace(1) @i1, ptr %t, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/linked-list.ll b/llvm/test/CodeGen/SPIRV/linked-list.ll
index be7634f8a57a2..de55654a3ff47 100644
--- a/llvm/test/CodeGen/SPIRV/linked-list.ll
+++ b/llvm/test/CodeGen/SPIRV/linked-list.ll
@@ -3,11 +3,11 @@
 ; TODO(#60133): Requires updates following opaque pointer migration.
 ; XFAIL: *
 
-%struct.Node = type { %struct.Node.0 addrspace(1)* }
+%struct.Node = type { ptr addrspace(1) }
 ; CHECK: %[[#]] = OpTypeOpaque "struct.Node.0"
 %struct.Node.0 = type opaque
 
-define spir_kernel void @create_linked_lists(%struct.Node addrspace(1)* nocapture %pNodes, i32 addrspace(1)* nocapture %allocation_index, i32 %list_length) {
+define spir_kernel void @create_linked_lists(ptr addrspace(1) nocapture %pNodes, ptr addrspace(1) nocapture %allocation_index, i32 %list_length) {
 entry:
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/abs.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/abs.ll
index f74c6ef99b455..8fe0d1e57b3b9 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/abs.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/abs.ll
@@ -13,9 +13,9 @@
 define dso_local spir_kernel void @test(i32 %a, <4 x i32> %b) local_unnamed_addr {
 entry:
   %0 = tail call i32 @llvm.abs.i32(i32 %a, i1 0)
-  store i32 %0, i32 addrspace(1)* @ga, align 4
+  store i32 %0, ptr addrspace(1) @ga, align 4
   %1 = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %b, i1 0)
-  store <4 x i32> %1, <4 x i32> addrspace(1)* @gb, align 4
+  store <4 x i32> %1, ptr addrspace(1) @gb, align 4
 
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll
index 32a86ed13b4ff..7bfb9eb065f7c 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/assume.ll
@@ -9,7 +9,7 @@
 define spir_func i32 @_Z3fooi(i32 %x) {
 entry:
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
   %0 = load i32, ptr %x.addr, align 4
   %cmp = icmp ne i32 %0, 0
   call void @llvm.assume(i1 %cmp)
@@ -23,33 +23,33 @@ define i32 @main() {
 entry:
   %retval = alloca i32, align 4
   %agg.tmp = alloca %class.anon, align 1
-  store i32 0, i32* %retval, align 4
-  call spir_func void @"_Z18kernel_single_taskIZ4mainE11fake_kernelZ4mainE3$_0EvT0_"(%class.anon* byval(%class.anon) align 1 %agg.tmp)
+  store i32 0, ptr %retval, align 4
+  call spir_func void @"_Z18kernel_single_taskIZ4mainE11fake_kernelZ4mainE3$_0EvT0_"(ptr byval(%class.anon) align 1 %agg.tmp)
   ret i32 0
 }
 
-define internal spir_func void @"_Z18kernel_single_taskIZ4mainE11fake_kernelZ4mainE3$_0EvT0_"(%class.anon* byval(%class.anon) align 1 %kernelFunc) {
+define internal spir_func void @"_Z18kernel_single_taskIZ4mainE11fake_kernelZ4mainE3$_0EvT0_"(ptr byval(%class.anon) align 1 %kernelFunc) {
 entry:
-  call spir_func void @"_ZZ4mainENK3$_0clEv"(%class.anon* %kernelFunc)
+  call spir_func void @"_ZZ4mainENK3$_0clEv"(ptr %kernelFunc)
   ret void
 }
 
-define internal spir_func void @"_ZZ4mainENK3$_0clEv"(%class.anon* %this) align 2 {
+define internal spir_func void @"_ZZ4mainENK3$_0clEv"(ptr %this) align 2 {
 entry:
-  %this.addr = alloca %class.anon*, align 8
+  %this.addr = alloca ptr, align 8
   %a = alloca i32, align 4
-  store %class.anon* %this, %class.anon** %this.addr, align 8
-  %this1 = load %class.anon*, %class.anon** %this.addr, align 8
-  %0 = bitcast i32* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
-  store i32 1, i32* %a, align 4
-  %1 = load i32, i32* %a, align 4
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
+  %0 = bitcast ptr %a to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %0)
+  store i32 1, ptr %a, align 4
+  %1 = load i32, ptr %a, align 4
   %2 = call spir_func i32 @_Z3fooi(i32 %1)
-  %3 = bitcast i32* %a to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %3)
+  %3 = bitcast ptr %a to ptr
+  call void @llvm.lifetime.end.p0(i64 4, ptr %3)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll
index d4b1592a044bc..fb5b9d0c38494 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bitreverse_small_type.ll
@@ -52,54 +52,54 @@
 define spir_kernel void @testBitRev() {
 entry:
   %call2 = call i2 @llvm.bitreverse.i2(i2 0)
-  store i2 %call2, i2* @G_i2_res
+  store i2 %call2, ptr @G_i2_res
   %call4 = call i4 @llvm.bitreverse.i4(i4 0)
-  store i4 %call4, i4* @G_i4_res
+  store i4 %call4, ptr @G_i4_res
   ret void
 }
 
 define spir_kernel void @testBitRevV2(<2 x i2> %a, <2 x i4> %b) {
 entry:
   %call2 = call <2 x i2> @llvm.bitreverse.v2i2(<2 x i2> %a)
-  store <2 x i2> %call2, <2 x i2>* @G_v2i2_res
+  store <2 x i2> %call2, ptr @G_v2i2_res
   %call4 = call <2 x i4> @llvm.bitreverse.v2i4(<2 x i4> %b)
-  store <2 x i4> %call4, <2 x i4>* @G_v2i4_res
+  store <2 x i4> %call4, ptr @G_v2i4_res
   ret void
 }
 
 define spir_kernel void @testBitRevV3(<3 x i2> %a, <3 x i4> %b) {
 entry:
   %call2 = call <3 x i2> @llvm.bitreverse.v3i2(<3 x i2> %a)
-  store <3 x i2> %call2, <3 x i2>* @G_v3i2_res
+  store <3 x i2> %call2, ptr @G_v3i2_res
   %call4 = call <3 x i4> @llvm.bitreverse.v3i4(<3 x i4> %b)
-  store <3 x i4> %call4, <3 x i4>* @G_v3i4_res
+  store <3 x i4> %call4, ptr @G_v3i4_res
   ret void
 }
 
 define spir_kernel void @testBitRevV4(<4 x i2> %a, <4 x i4> %b) {
 entry:
   %call2 = call <4 x i2> @llvm.bitreverse.v4i2(<4 x i2> %a)
-  store <4 x i2> %call2, <4 x i2>* @G_v4i2_res
+  store <4 x i2> %call2, ptr @G_v4i2_res
   %call4 = call <4 x i4> @llvm.bitreverse.v4i4(<4 x i4> %b)
-  store <4 x i4> %call4, <4 x i4>* @G_v4i4_res
+  store <4 x i4> %call4, ptr @G_v4i4_res
   ret void
 }
 
 define spir_kernel void @testBitRevV8(<8 x i2> %a, <8 x i4> %b) {
 entry:
   %call2 = call <8 x i2> @llvm.bitreverse.v8i2(<8 x i2> %a)
-  store <8 x i2> %call2, <8 x i2>* @G_v8i2_res
+  store <8 x i2> %call2, ptr @G_v8i2_res
   %call4 = call <8 x i4> @llvm.bitreverse.v8i4(<8 x i4> %b)
-  store <8 x i4> %call4, <8 x i4>* @G_v8i4_res
+  store <8 x i4> %call4, ptr @G_v8i4_res
   ret void
 }
 
 define spir_kernel void @testBitRevV16(<16 x i2> %a, <16 x i4> %b) {
 entry:
   %call2 = call <16 x i2> @llvm.bitreverse.v16i2(<16 x i2> %a)
-  store <16 x i2> %call2, <16 x i2>* @G_v16i2_res
+  store <16 x i2> %call2, ptr @G_v16i2_res
   %call4 = call <16 x i4> @llvm.bitreverse.v16i4(<16 x i4> %b)
-  store <16 x i4> %call4, <16 x i4>* @G_v16i4_res
+  store <16 x i4> %call4, ptr @G_v16i4_res
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bswap.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bswap.ll
index 0ec99a602e4b0..4593edd84a9dd 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bswap.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/bswap.ll
@@ -47,23 +47,23 @@ entry:
   %d = alloca i32, align 4
   %e = alloca i64, align 8
   %f = alloca i64, align 8
-  store i32 0, i32* %retval, align 4
-  store i16 258, i16* %a, align 2
-  %0 = load i16, i16* %a, align 2
+  store i32 0, ptr %retval, align 4
+  store i16 258, ptr %a, align 2
+  %0 = load i16, ptr %a, align 2
   %1 = call i16 @llvm.bswap.i16(i16 %0)
-  store i16 %1, i16* %b, align 2
-  store i16 234, i16* %h, align 2
-  %2 = load i16, i16* %h, align 2
+  store i16 %1, ptr %b, align 2
+  store i16 234, ptr %h, align 2
+  %2 = load i16, ptr %h, align 2
   %3 = call i16 @llvm.bswap.i16(i16 %2)
-  store i16 %3, i16* %i, align 2
-  store i32 566, i32* %c, align 4
-  %4 = load i32, i32* %c, align 4
+  store i16 %3, ptr %i, align 2
+  store i32 566, ptr %c, align 4
+  %4 = load i32, ptr %c, align 4
   %5 = call i32 @llvm.bswap.i32(i32 %4)
-  store i32 %5, i32* %d, align 4
-  store i64 12587, i64* %e, align 8
-  %6 = load i64, i64* %e, align 8
+  store i32 %5, ptr %d, align 4
+  store i64 12587, ptr %e, align 8
+  %6 = load i64, ptr %e, align 8
   %7 = call i64 @llvm.bswap.i64(i64 %6)
-  store i64 %7, i64* %f, align 8
+  store i64 %7, ptr %f, align 8
   ret i32 0
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ctpop.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ctpop.ll
index 21598d712f5c3..98bc6e0f79e04 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ctpop.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ctpop.ll
@@ -16,15 +16,15 @@
 define dso_local spir_kernel void @test(i8 %x8, i16 %x16, i32 %x32, i64 %x64, <2 x i32> %x2i32) local_unnamed_addr {
 entry:
   %0 = tail call i8 @llvm.ctpop.i8(i8 %x8)
-  store i8 %0, i8 addrspace(1)* @g1, align 4
+  store i8 %0, ptr addrspace(1) @g1, align 4
   %1 = tail call i16 @llvm.ctpop.i16(i16 %x16)
-  store i16 %1, i16 addrspace(1)* @g2, align 4
+  store i16 %1, ptr addrspace(1) @g2, align 4
   %2 = tail call i32 @llvm.ctpop.i32(i32 %x32)
-  store i32 %2, i32 addrspace(1)* @g3, align 4
+  store i32 %2, ptr addrspace(1) @g3, align 4
   %3 = tail call i64 @llvm.ctpop.i64(i64 %x64)
-  store i64 %3, i64 addrspace(1)* @g4, align 8
+  store i64 %3, ptr addrspace(1) @g4, align 8
   %4 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %x2i32)
-  store <2 x i32> %4, <2 x i32> addrspace(1)* @g5, align 4
+  store <2 x i32> %4, ptr addrspace(1) @g5, align 4
 
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/expect.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/expect.ll
index ec40c263be411..5d45655976ac1 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/expect.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/expect.ll
@@ -17,63 +17,63 @@
 define spir_kernel void @_ZTSZ4mainE15kernel_function() {
 entry:
   %0 = alloca %"class._ZTSZ4mainE3$_0.anon", align 1
-  %1 = bitcast %"class._ZTSZ4mainE3$_0.anon"* %0 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* %1)
-  %2 = addrspacecast %"class._ZTSZ4mainE3$_0.anon"* %0 to %"class._ZTSZ4mainE3$_0.anon" addrspace(4)*
-  call spir_func void @"_ZZ4mainENK3$_0clEv"(%"class._ZTSZ4mainE3$_0.anon" addrspace(4)* %2)
-  %3 = bitcast %"class._ZTSZ4mainE3$_0.anon"* %0 to i8*
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* %3)
+  %1 = bitcast ptr %0 to ptr
+  call void @llvm.lifetime.start.p0(i64 1, ptr %1)
+  %2 = addrspacecast ptr %0 to ptr addrspace(4)
+  call spir_func void @"_ZZ4mainENK3$_0clEv"(ptr addrspace(4) %2)
+  %3 = bitcast ptr %0 to ptr
+  call void @llvm.lifetime.end.p0(i64 1, ptr %3)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-define internal spir_func void @"_ZZ4mainENK3$_0clEv"(%"class._ZTSZ4mainE3$_0.anon" addrspace(4)* %this) align 2 {
+define internal spir_func void @"_ZZ4mainENK3$_0clEv"(ptr addrspace(4) %this) align 2 {
 entry:
-  %this.addr = alloca %"class._ZTSZ4mainE3$_0.anon" addrspace(4)*, align 8
+  %this.addr = alloca ptr addrspace(4), align 8
   %a = alloca i32, align 4
   %b = alloca i32, align 4
-  store %"class._ZTSZ4mainE3$_0.anon" addrspace(4)* %this, %"class._ZTSZ4mainE3$_0.anon" addrspace(4)** %this.addr, align 8
-  %this1 = load %"class._ZTSZ4mainE3$_0.anon" addrspace(4)*, %"class._ZTSZ4mainE3$_0.anon" addrspace(4)** %this.addr, align 8
-  %0 = bitcast i32* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
+  store ptr addrspace(4) %this, ptr %this.addr, align 8
+  %this1 = load ptr addrspace(4), ptr %this.addr, align 8
+  %0 = bitcast ptr %a to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %0)
   %call = call spir_func i32 @_Z12expect_consti(i32 1)
-  store i32 %call, i32* %a, align 4
-  %1 = bitcast i32* %b to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %1)
+  store i32 %call, ptr %a, align 4
+  %1 = bitcast ptr %b to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %1)
   %call2 = call spir_func i32 @_Z10expect_funi(i32 2)
-  store i32 %call2, i32* %b, align 4
-  %2 = bitcast i32* %b to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %2)
-  %3 = bitcast i32* %a to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %3)
+  store i32 %call2, ptr %b, align 4
+  %2 = bitcast ptr %b to ptr
+  call void @llvm.lifetime.end.p0(i64 4, ptr %2)
+  %3 = bitcast ptr %a to ptr
+  call void @llvm.lifetime.end.p0(i64 4, ptr %3)
   ret void
 }
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 define spir_func i32 @_Z12expect_consti(i32 %x) {
 entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %conv = sext i32 %0 to i64
   %expval = call i64 @llvm.expect.i64(i64 %conv, i64 1)
   %tobool = icmp ne i64 %expval, 0
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store i32 0, i32* %retval, align 4
+  store i32 0, ptr %retval, align 4
   br label %return
 
 if.end:                                           ; preds = %entry
-  %1 = load i32, i32* %x.addr, align 4
-  store i32 %1, i32* %retval, align 4
+  %1 = load i32, ptr %x.addr, align 4
+  store i32 %1, ptr %retval, align 4
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %2 = load i32, i32* %retval, align 4
+  %2 = load i32, ptr %retval, align 4
   ret i32 %2
 }
 
@@ -81,8 +81,8 @@ define spir_func i32 @_Z10expect_funi(i32 %x) {
 entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %conv = sext i32 %0 to i64
   %call = call spir_func i32 @_Z3foov()
   %conv1 = sext i32 %call to i64
@@ -91,16 +91,16 @@ entry:
   br i1 %tobool, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  store i32 0, i32* %retval, align 4
+  store i32 0, ptr %retval, align 4
   br label %return
 
 if.end:                                           ; preds = %entry
-  %1 = load i32, i32* %x.addr, align 4
-  store i32 %1, i32* %retval, align 4
+  %1 = load i32, ptr %x.addr, align 4
+  store i32 %1, ptr %retval, align 4
   br label %return
 
 return:                                           ; preds = %if.end, %if.then
-  %2 = load i32, i32* %retval, align 4
+  %2 = load i32, ptr %retval, align 4
   ret i32 %2
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fp-to-int-intrinsics.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fp-to-int-intrinsics.ll
index 66c744f780ddc..fbb67da19223b 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fp-to-int-intrinsics.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fp-to-int-intrinsics.ll
@@ -24,7 +24,7 @@ define spir_kernel void @testfunction_float_to_signed_i8(float %input) {
 entry:
    %ptr = alloca i8
    %signed_int = call i8 @llvm.fptosi.sat.i8.f32(float %input)
-   store i8 %signed_int, i8* %ptr
+   store i8 %signed_int, ptr %ptr
    ret void
 
 }
@@ -36,7 +36,7 @@ define spir_kernel void @testfunction_float_to_signed_i16(float %input) {
 entry:
    %ptr = alloca i16
    %signed_int = call i16 @llvm.fptosi.sat.i16.f32(float %input)
-   store i16 %signed_int, i16* %ptr
+   store i16 %signed_int, ptr %ptr
    ret void
 
 }
@@ -47,7 +47,7 @@ define spir_kernel void @testfunction_float_to_signed_i32(float %input) {
 entry:
    %ptr = alloca i32
    %signed_int = call i32 @llvm.fptosi.sat.i32.f32(float %input)
-   store i32 %signed_int, i32* %ptr
+   store i32 %signed_int, ptr %ptr
    ret void
 
 }
@@ -59,7 +59,7 @@ define spir_kernel void @testfunction_float_to_signed_i64(float %input) {
 entry:
    %ptr = alloca i64
    %signed_int = call i64 @llvm.fptosi.sat.i64.f32(float %input)
-   store i64 %signed_int, i64* %ptr
+   store i64 %signed_int, ptr %ptr
    ret void
 }
 declare i64 @llvm.fptosi.sat.i64.f32(float)
@@ -70,7 +70,7 @@ define spir_kernel void @testfunction_double_to_signed_i8(double %input) {
 entry:
    %ptr = alloca i8
    %signed_int = call i8 @llvm.fptosi.sat.i8.f64(double %input)
-   store i8 %signed_int, i8* %ptr
+   store i8 %signed_int, ptr %ptr
    ret void
 }
 declare i8 @llvm.fptosi.sat.i8.f64(double)
@@ -81,7 +81,7 @@ define spir_kernel void @testfunction_double_to_signed_i16(double %input) {
 entry:
    %ptr = alloca i16
    %signed_int = call i16 @llvm.fptosi.sat.i16.f64(double %input)
-   store i16 %signed_int, i16* %ptr
+   store i16 %signed_int, ptr %ptr
    ret void
 }
 declare i16 @llvm.fptosi.sat.i16.f64(double)
@@ -92,7 +92,7 @@ define spir_kernel void @testfunction_double_to_signed_i32(double %input) {
 entry:
    %ptr = alloca i32
    %signed_int = call i32 @llvm.fptosi.sat.i32.f64(double %input)
-   store i32 %signed_int, i32* %ptr
+   store i32 %signed_int, ptr %ptr
    ret void
 }
 declare i32 @llvm.fptosi.sat.i32.f64(double)
@@ -103,7 +103,7 @@ define spir_kernel void @testfunction_double_to_signed_i64(double %input) {
 entry:
    %ptr = alloca i64
    %signed_int = call i64 @llvm.fptosi.sat.i64.f64(double %input)
-   store i64 %signed_int, i64* %ptr
+   store i64 %signed_int, ptr %ptr
    ret void
 }
 declare i64 @llvm.fptosi.sat.i64.f64(double)
@@ -113,7 +113,7 @@ define spir_kernel void @testfunction_float_to_unsigned_i8(float %input) {
 entry:
    %ptr = alloca i8
    %unsigned_int = call i8 @llvm.fptoui.sat.i8.f32(float %input)
-   store i8 %unsigned_int, i8* %ptr
+   store i8 %unsigned_int, ptr %ptr
    ret void
 }
 declare i8 @llvm.fptoui.sat.i8.f32(float)
@@ -124,7 +124,7 @@ define spir_kernel void @testfunction_float_to_unsigned_i16(float %input) {
 entry:
    %ptr = alloca i16
    %unsigned_int = call i16 @llvm.fptoui.sat.i16.f32(float %input)
-   store i16 %unsigned_int, i16* %ptr
+   store i16 %unsigned_int, ptr %ptr
    ret void
 }
 declare i16 @llvm.fptoui.sat.i16.f32(float)
@@ -135,7 +135,7 @@ define spir_kernel void @testfunction_float_to_unsigned_i32(float %input) {
 entry:
    %ptr = alloca i32
    %unsigned_int = call i32 @llvm.fptoui.sat.i32.f32(float %input)
-   store i32 %unsigned_int, i32* %ptr
+   store i32 %unsigned_int, ptr %ptr
    ret void
 }
 declare i32 @llvm.fptoui.sat.i32.f32(float)
@@ -146,7 +146,7 @@ define spir_kernel void @testfunction_float_to_unsigned_i64(float %input) {
 entry:
    %ptr = alloca i64
    %unsigned_int = call i64 @llvm.fptoui.sat.i64.f32(float %input)
-   store i64 %unsigned_int, i64* %ptr
+   store i64 %unsigned_int, ptr %ptr
    ret void
 }
 declare i64 @llvm.fptoui.sat.i64.f32(float)
@@ -157,7 +157,7 @@ define spir_kernel void @testfunction_double_to_unsigned_i8(double %input) {
 entry:
    %ptr = alloca i8
    %unsigned_int = call i8 @llvm.fptoui.sat.i8.f64(double %input)
-   store i8 %unsigned_int, i8* %ptr
+   store i8 %unsigned_int, ptr %ptr
    ret void
 }
 declare i8 @llvm.fptoui.sat.i8.f64(double)
@@ -168,7 +168,7 @@ define spir_kernel void @testfunction_double_to_unsigned_i16(double %input) {
 entry:
    %ptr = alloca i16
    %unsigned_int = call i16 @llvm.fptoui.sat.i16.f64(double %input)
-   store i16 %unsigned_int, i16* %ptr
+   store i16 %unsigned_int, ptr %ptr
    ret void
 }
 declare i16 @llvm.fptoui.sat.i16.f64(double)
@@ -179,7 +179,7 @@ define spir_kernel void @testfunction_double_to_unsigned_i32(double %input) {
 entry:
    %ptr = alloca i32
    %unsigned_int = call i32 @llvm.fptoui.sat.i32.f64(double %input)
-   store i32 %unsigned_int, i32* %ptr
+   store i32 %unsigned_int, ptr %ptr
    ret void
 }
 declare i32 @llvm.fptoui.sat.i32.f64(double)
@@ -190,7 +190,7 @@ define spir_kernel void @testfunction_double_to_unsigned_i64(double %input) {
 entry:
    %ptr = alloca i64
    %unsigned_int = call i64 @llvm.fptoui.sat.i64.f64(double %input)
-   store i64 %unsigned_int, i64* %ptr
+   store i64 %unsigned_int, ptr %ptr
    ret void
 }
 declare i64 @llvm.fptoui.sat.i64.f64(double)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/invariant.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/invariant.ll
index bb6225f8ad4a6..d0587872ebb1d 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/invariant.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/invariant.ll
@@ -6,14 +6,14 @@
 
 @WGSharedVar = internal addrspace(3) constant i64 0, align 8
 
-declare {}* @llvm.invariant.start.p3i8(i64 immarg, i8 addrspace(3)* nocapture)
+declare ptr @llvm.invariant.start.p3(i64 immarg, ptr addrspace(3) nocapture)
 
-declare void @llvm.invariant.end.p3i8({}*, i64 immarg, i8 addrspace(3)* nocapture)
+declare void @llvm.invariant.end.p3(ptr, i64 immarg, ptr addrspace(3) nocapture)
 
 define linkonce_odr dso_local spir_func void @func() {
-  store i64 2, i64 addrspace(3)* @WGSharedVar
-  %1 = bitcast i64 addrspace(3)* @WGSharedVar to i8 addrspace(3)*
-  %2 = call {}* @llvm.invariant.start.p3i8(i64 8, i8 addrspace(3)* %1)
-  call void @llvm.invariant.end.p3i8({}* %2, i64 8, i8 addrspace(3)* %1)
+  store i64 2, ptr addrspace(3) @WGSharedVar
+  %1 = bitcast ptr addrspace(3) @WGSharedVar to ptr addrspace(3)
+  %2 = call ptr @llvm.invariant.start.p3(i64 8, ptr addrspace(3) %1)
+  call void @llvm.invariant.end.p3(ptr %2, i64 8, ptr addrspace(3) %1)
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memcpy.align.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memcpy.align.ll
index 66a12b179dd14..772fa154c0fc5 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memcpy.align.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memcpy.align.ll
@@ -7,48 +7,48 @@
 @__const.foo.b = private unnamed_addr addrspace(2) constant %struct.B { [2 x i32] [i32 1, i32 2] }, align 4
 @__const.bar.a = private unnamed_addr addrspace(2) constant %struct.A { i64 0, %struct.B { [2 x i32] [i32 1, i32 2] } }, align 8
 
-define spir_func void @foo(%struct.A* noalias sret(%struct.A) %agg.result) {
+define spir_func void @foo(ptr noalias sret(%struct.A) %agg.result) {
 entry:
   %b = alloca %struct.B, align 4
-  %0 = bitcast %struct.B* %b to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* %0)
-  %1 = bitcast %struct.B* %b to i8*
-  call void @llvm.memcpy.p0i8.p2i8.i32(i8* align 4 %1, i8 addrspace(2)* align 4 bitcast (%struct.B addrspace(2)* @__const.foo.b to i8 addrspace(2)*), i32 8, i1 false)
+  %0 = bitcast ptr %b to ptr
+  call void @llvm.lifetime.start.p0(i64 8, ptr %0)
+  %1 = bitcast ptr %b to ptr
+  call void @llvm.memcpy.p0.p2.i32(ptr align 4 %1, ptr addrspace(2) align 4 @__const.foo.b, i32 8, i1 false)
 ; CHECK: OpCopyMemorySized %[[#]] %[[#]] %[[#]] Aligned 4
-  %b1 = getelementptr inbounds %struct.A, %struct.A* %agg.result, i32 0, i32 1
-  %2 = bitcast %struct.B* %b1 to i8*
-  %3 = bitcast %struct.B* %b to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %2, i8* align 4 %3, i32 8, i1 false)
+  %b1 = getelementptr inbounds %struct.A, ptr %agg.result, i32 0, i32 1
+  %2 = bitcast ptr %b1 to ptr
+  %3 = bitcast ptr %b to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 %2, ptr align 4 %3, i32 8, i1 false)
 ; CHECK: %[[#PTR1:]] = OpInBoundsPtrAccessChain %[[#]] %[[#]] %[[#]] %[[#]]
 ; CHECK: OpCopyMemorySized %[[#PTR1]] %[[#]] %[[#]] Aligned 8
-  %4 = bitcast %struct.B* %b to i8*
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* %4)
+  %4 = bitcast ptr %b to ptr
+  call void @llvm.lifetime.end.p0(i64 8, ptr %4)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* captures(none))
+declare void @llvm.lifetime.start.p0(i64, ptr captures(none))
 
-declare void @llvm.memcpy.p0i8.p2i8.i32(i8* captures(none) writeonly, i8 addrspace(2)* captures(none) readonly, i32, i1)
+declare void @llvm.memcpy.p0.p2.i32(ptr captures(none) writeonly, ptr addrspace(2) captures(none) readonly, i32, i1)
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* captures(none) writeonly, i8* captures(none) readonly, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr captures(none) writeonly, ptr captures(none) readonly, i32, i1)
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* captures(none))
+declare void @llvm.lifetime.end.p0(i64, ptr captures(none))
 
-define spir_func void @bar(%struct.B* noalias sret(%struct.B) %agg.result) {
+define spir_func void @bar(ptr noalias sret(%struct.B) %agg.result) {
 entry:
   %a = alloca %struct.A, align 8
-  %0 = bitcast %struct.A* %a to i8*
-  call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
-  %1 = bitcast %struct.A* %a to i8*
-  call void @llvm.memcpy.p0i8.p2i8.i32(i8* align 8 %1, i8 addrspace(2)* align 8 bitcast (%struct.A addrspace(2)* @__const.bar.a to i8 addrspace(2)*), i32 16, i1 false)
+  %0 = bitcast ptr %a to ptr
+  call void @llvm.lifetime.start.p0(i64 16, ptr %0)
+  %1 = bitcast ptr %a to ptr
+  call void @llvm.memcpy.p0.p2.i32(ptr align 8 %1, ptr addrspace(2) align 8 @__const.bar.a, i32 16, i1 false)
 ; CHECK: OpCopyMemorySized %[[#]] %[[#]] %[[#]] Aligned 8
-  %b = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1
-  %2 = bitcast %struct.B* %agg.result to i8*
-  %3 = bitcast %struct.B* %b to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %2, i8* align 8 %3, i32 8, i1 false)
+  %b = getelementptr inbounds %struct.A, ptr %a, i32 0, i32 1
+  %2 = bitcast ptr %agg.result to ptr
+  %3 = bitcast ptr %b to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %2, ptr align 8 %3, i32 8, i1 false)
 ; CHECK: %[[#PTR2:]] = OpInBoundsPtrAccessChain %[[#]] %[[#]] %[[#]] %[[#]]
 ; CHECK: OpCopyMemorySized %[[#]] %[[#PTR2]] %[[#]] Aligned 4
-  %4 = bitcast %struct.A* %a to i8*
-  call void @llvm.lifetime.end.p0i8(i64 16, i8* %4)
+  %4 = bitcast ptr %a to ptr
+  call void @llvm.lifetime.end.p0(i64 16, ptr %4)
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll
index 51b76640cc056..986cab7516245 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll
@@ -38,49 +38,49 @@
 @InvocIndex = external local_unnamed_addr addrspace(1) constant i64, align 8
 @"func_object1" = internal addrspace(3) global %class.kfunc zeroinitializer, align 8
 
-define spir_kernel void @test_full_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(1)* captures(none) %out) {
-  %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)*
-  %2 = bitcast %struct.SomeStruct addrspace(1)* %out to i8 addrspace(1)*
-  call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %2, i8 addrspace(1)* align 64 %1, i32 64, i1 false)
+define spir_kernel void @test_full_move(ptr addrspace(1) captures(none) readonly %in, ptr addrspace(1) captures(none) %out) {
+  %1 = bitcast ptr addrspace(1) %in to ptr addrspace(1)
+  %2 = bitcast ptr addrspace(1) %out to ptr addrspace(1)
+  call void @llvm.memmove.p1.p1.i32(ptr addrspace(1) align 64 %2, ptr addrspace(1) align 64 %1, i32 64, i1 false)
   ret void
 }
 
-define spir_kernel void @test_partial_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(4)* captures(none) %out) {
-  %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)*
-  %2 = bitcast %struct.SomeStruct addrspace(4)* %out to i8 addrspace(4)*
-  %3 = addrspacecast i8 addrspace(4)* %2 to i8 addrspace(1)*
-  call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %3, i8 addrspace(1)* align 64 %1, i32 36, i1 false)
+define spir_kernel void @test_partial_move(ptr addrspace(1) captures(none) readonly %in, ptr addrspace(4) captures(none) %out) {
+  %1 = bitcast ptr addrspace(1) %in to ptr addrspace(1)
+  %2 = bitcast ptr addrspace(4) %out to ptr addrspace(4)
+  %3 = addrspacecast ptr addrspace(4) %2 to ptr addrspace(1)
+  call void @llvm.memmove.p1.p1.i32(ptr addrspace(1) align 64 %3, ptr addrspace(1) align 64 %1, i32 36, i1 false)
   ret void
 }
 
-define spir_kernel void @test_array(i8 addrspace(1)* %in, i8 addrspace(1)* %out) {
-  call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i32 30, i1 false)
+define spir_kernel void @test_array(ptr addrspace(1) %in, ptr addrspace(1) %out) {
+  call void @llvm.memmove.p1.p1.i32(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 30, i1 false)
   ret void
 }
 
 define weak_odr dso_local spir_kernel void @test_phi() local_unnamed_addr {
 entry:
   %0 = alloca i32, align 8
-  %1 = addrspacecast i32* %0 to i32 addrspace(4)*
-  %2 = load i64, i64 addrspace(1)* @InvocIndex, align 8
+  %1 = addrspacecast ptr %0 to ptr addrspace(4)
+  %2 = load i64, ptr addrspace(1) @InvocIndex, align 8
   %cmp = icmp eq i64 %2, 0
   br i1 %cmp, label %leader, label %entry.merge_crit_edge
 
 entry.merge_crit_edge:                            ; preds = %entry
-  %3 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)*
+  %3 = bitcast ptr addrspace(4) %1 to ptr addrspace(4)
   br label %merge
 
 leader:                                           ; preds = %entry
-  %4 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)*
+  %4 = bitcast ptr addrspace(4) %1 to ptr addrspace(4)
   br label %merge
 
 merge:                                            ; preds = %entry.merge_crit_edge, %leader
-  %phi = phi i8 addrspace(4)* [ %3, %entry.merge_crit_edge ], [ %4, %leader ]
-  %5 = addrspacecast i8 addrspace(3)* bitcast (%class.kfunc addrspace(3)* @"func_object1" to i8 addrspace(3)*) to i8 addrspace(4)*
-  call void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* align 8 dereferenceable(32) %5, i8 addrspace(4)* align 8 dereferenceable(32) %phi, i64 32, i1 false)
+  %phi = phi ptr addrspace(4) [ %3, %entry.merge_crit_edge ], [ %4, %leader ]
+  %5 = addrspacecast ptr addrspace(3) @"func_object1" to ptr addrspace(4)
+  call void @llvm.memmove.p4.p4.i64(ptr addrspace(4) align 8 dereferenceable(32) %5, ptr addrspace(4) align 8 dereferenceable(32) %phi, i64 32, i1 false)
   ret void
 }
 
-declare void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* captures(none) writeonly, i8 addrspace(4)* captures(none) readonly, i64, i1 immarg)
+declare void @llvm.memmove.p4.p4.i64(ptr addrspace(4) captures(none) writeonly, ptr addrspace(4) captures(none) readonly, i64, i1 immarg)
 
-declare void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* captures(none), i8 addrspace(1)* captures(none) readonly, i32, i1)
+declare void @llvm.memmove.p1.p1.i32(ptr addrspace(1) captures(none), ptr addrspace(1) captures(none) readonly, i32, i1)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll
index d5e70ae9e7aa8..2d769ff6ab59c 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll
@@ -57,34 +57,34 @@
 
 %struct.S1 = type { i32, i32, i32 }
 
-define spir_func void @_Z5foo11v(%struct.S1 addrspace(4)* noalias nocapture sret(%struct.S1 addrspace(4)*) %agg.result, i32 %s1, i64 %s2, i8 %v) {
+define spir_func void @_Z5foo11v(ptr addrspace(4) noalias nocapture sret(ptr addrspace(4)) %agg.result, i32 %s1, i64 %s2, i8 %v) {
   %x = alloca [4 x i8]
-  %x.bc = bitcast [4 x i8]* %x to i8*
-  %a = bitcast %struct.S1 addrspace(4)* %agg.result to i8 addrspace(4)*
-  tail call void @llvm.memset.p4i8.i32(i8 addrspace(4)* align 4 %a, i8 0, i32 12, i1 false)
-  tail call void @llvm.memset.p0i8.i32(i8* align 4 %x.bc, i8 21, i32 4, i1 false)
+  %x.bc = bitcast ptr %x to ptr
+  %a = bitcast ptr addrspace(4) %agg.result to ptr addrspace(4)
+  tail call void @llvm.memset.p4.i32(ptr addrspace(4) align 4 %a, i8 0, i32 12, i1 false)
+  tail call void @llvm.memset.p0.i32(ptr align 4 %x.bc, i8 21, i32 4, i1 false)
 
   ;; non-const value
-  tail call void @llvm.memset.p0i8.i32(i8* align 4 %x.bc, i8 %v, i32 3, i1 false)
+  tail call void @llvm.memset.p0.i32(ptr align 4 %x.bc, i8 %v, i32 3, i1 false)
 
   ;; non-const value and size
-  tail call void @llvm.memset.p0i8.i32(i8*  align 4 %x.bc, i8 %v, i32 %s1, i1 false)
+  tail call void @llvm.memset.p0.i32(ptr  align 4 %x.bc, i8 %v, i32 %s1, i1 false)
 
   ;; Address spaces, non-const value and size
-  %b = addrspacecast i8 addrspace(4)* %a to i8 addrspace(3)*
-  tail call void @llvm.memset.p3i8.i32(i8 addrspace(3)* align 4 %b, i8 %v, i32 %s1, i1 false)
-  %c = addrspacecast i8 addrspace(4)* %a to i8 addrspace(1)*
-  tail call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 %c, i8 %v, i64 %s2, i1 false)
+  %b = addrspacecast ptr addrspace(4) %a to ptr addrspace(3)
+  tail call void @llvm.memset.p3.i32(ptr addrspace(3) align 4 %b, i8 %v, i32 %s1, i1 false)
+  %c = addrspacecast ptr addrspace(4) %a to ptr addrspace(1)
+  tail call void @llvm.memset.p1.i64(ptr addrspace(1) align 4 %c, i8 %v, i64 %s2, i1 false)
 
   ;; Volatile
-  tail call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 %c, i8 %v, i64 %s2, i1 true)
+  tail call void @llvm.memset.p1.i64(ptr addrspace(1) align 4 %c, i8 %v, i64 %s2, i1 true)
   ret void
 }
 
-declare void @llvm.memset.p4i8.i32(i8 addrspace(4)* nocapture, i8, i32, i1)
+declare void @llvm.memset.p4.i32(ptr addrspace(4) nocapture, i8, i32, i1)
 
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1)
 
-declare void @llvm.memset.p3i8.i32(i8 addrspace(3)*, i8, i32, i1)
+declare void @llvm.memset.p3.i32(ptr addrspace(3), i8, i32, i1)
 
-declare void @llvm.memset.p1i8.i64(i8 addrspace(1)*, i8, i64, i1)
+declare void @llvm.memset.p1.i64(ptr addrspace(1), i8, i64, i1)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/sqrt.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/sqrt.ll
index 7ad2e2b5b2ec2..8222d4f62d012 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/sqrt.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/sqrt.ll
@@ -9,14 +9,14 @@
 ; CHECK-DAG: %[[#Double4Arg:]] = OpConstantComposite %[[#Double4]]
 
 ;; We need to store sqrt results, otherwise isel does not emit sqrts as dead insts.
-define spir_func void @test_sqrt(float* %x, double* %y, <4 x double>* %z) {
+define spir_func void @test_sqrt(ptr %x, ptr %y, ptr %z) {
 entry:
   %0 = call float @llvm.sqrt.f32(float 0x40091EB860000000)
-  store float %0, float* %x
+  store float %0, ptr %x
   %1 = call double @llvm.sqrt.f64(double 2.710000e+00)
-  store double %1, double* %y
+  store double %1, ptr %y
   %2 = call <4 x double> @llvm.sqrt.v4f64(<4 x double> <double 5.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01>)
-  store <4 x double> %2, <4 x double>* %z
+  store <4 x double> %2, ptr %z
 ; CHECK: %[[#]] = OpExtInst %[[#Float]] %[[#ExtInstSetId]] sqrt %[[#FloatArg]]
 ; CHECK: %[[#]] = OpExtInst %[[#Double]] %[[#ExtInstSetId]] sqrt %[[#DoubleArg]]
 ; CHECK: %[[#]] = OpExtInst %[[#Double4]] %[[#ExtInstSetId]] sqrt %[[#Double4Arg]]
diff --git a/llvm/test/CodeGen/SPIRV/lshr-constexpr.ll b/llvm/test/CodeGen/SPIRV/lshr-constexpr.ll
index f2f97f56263f9..04089a593d8ea 100644
--- a/llvm/test/CodeGen/SPIRV/lshr-constexpr.ll
+++ b/llvm/test/CodeGen/SPIRV/lshr-constexpr.ll
@@ -14,9 +14,9 @@
 ; CHECK:     %[[#bitcast_res:]] = OpBitcast %[[#type_int64]] %[[#vec_const]]
 ; CHECK:     %[[#shift_res:]] = OpShiftRightLogical %[[#type_int64]] %[[#bitcast_res]] %[[#const32]]
 
-define void @foo(i64* %arg) {
+define void @foo(ptr %arg) {
 entry:
   %0 = lshr i64 bitcast (<2 x i32> <i32 1, i32 1> to i64), 32
-  store i64 %0, i64* %arg
+  store i64 %0, ptr %arg
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/multi_md.ll b/llvm/test/CodeGen/SPIRV/multi_md.ll
index 6d8af7defd752..5a135777ff814 100644
--- a/llvm/test/CodeGen/SPIRV/multi_md.ll
+++ b/llvm/test/CodeGen/SPIRV/multi_md.ll
@@ -11,27 +11,27 @@ define spir_kernel void @__OpenCL_writer_kernel(i8 zeroext %c, i32 %i) {
 entry:
   %c.addr = alloca i8, align 1
   %i.addr = alloca i32, align 4
-  store i8 %c, i8* %c.addr, align 1
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i8, i8* %c.addr, align 1
-  store i8 %0, i8 addrspace(1)* getelementptr inbounds (%struct.my_struct_t, %struct.my_struct_t addrspace(1)* @var, i32 0, i32 0), align 1
-  %1 = load i32, i32* %i.addr, align 4
-  store i32 %1, i32 addrspace(1)* getelementptr inbounds (%struct.my_struct_t, %struct.my_struct_t addrspace(1)* @var, i32 0, i32 1), align 4
+  store i8 %c, ptr %c.addr, align 1
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i8, ptr %c.addr, align 1
+  store i8 %0, ptr addrspace(1) @var, align 1
+  %1 = load i32, ptr %i.addr, align 4
+  store i32 %1, ptr addrspace(1) getelementptr inbounds (%struct.my_struct_t, ptr addrspace(1) @var, i32 0, i32 1), align 4
   ret void
 }
 
-define spir_kernel void @__OpenCL_reader_kernel(i8 addrspace(1)* %C, i32 addrspace(1)* %I) {
+define spir_kernel void @__OpenCL_reader_kernel(ptr addrspace(1) %C, ptr addrspace(1) %I) {
 entry:
-  %C.addr = alloca i8 addrspace(1)*, align 8
-  %I.addr = alloca i32 addrspace(1)*, align 8
-  store i8 addrspace(1)* %C, i8 addrspace(1)** %C.addr, align 8
-  store i32 addrspace(1)* %I, i32 addrspace(1)** %I.addr, align 8
-  %0 = load i8, i8 addrspace(1)* getelementptr inbounds (%struct.my_struct_t, %struct.my_struct_t addrspace(1)* @var, i32 0, i32 0), align 1
-  %1 = load i8 addrspace(1)*, i8 addrspace(1)** %C.addr, align 8
-  store i8 %0, i8 addrspace(1)* %1, align 1
-  %2 = load i32, i32 addrspace(1)* getelementptr inbounds (%struct.my_struct_t, %struct.my_struct_t addrspace(1)* @var, i32 0, i32 1), align 4
-  %3 = load i32 addrspace(1)*, i32 addrspace(1)** %I.addr, align 8
-  store i32 %2, i32 addrspace(1)* %3, align 4
+  %C.addr = alloca ptr addrspace(1), align 8
+  %I.addr = alloca ptr addrspace(1), align 8
+  store ptr addrspace(1) %C, ptr %C.addr, align 8
+  store ptr addrspace(1) %I, ptr %I.addr, align 8
+  %0 = load i8, ptr addrspace(1) @var, align 1
+  %1 = load ptr addrspace(1), ptr %C.addr, align 8
+  store i8 %0, ptr addrspace(1) %1, align 1
+  %2 = load i32, ptr addrspace(1) getelementptr inbounds (%struct.my_struct_t, ptr addrspace(1) @var, i32 0, i32 1), align 4
+  %3 = load ptr addrspace(1), ptr %I.addr, align 8
+  store i32 %2, ptr addrspace(1) %3, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/opencl/basic/get_global_offset.ll b/llvm/test/CodeGen/SPIRV/opencl/basic/get_global_offset.ll
index 46a7cf4249426..19c09e34c3b28 100644
--- a/llvm/test/CodeGen/SPIRV/opencl/basic/get_global_offset.ll
+++ b/llvm/test/CodeGen/SPIRV/opencl/basic/get_global_offset.ll
@@ -20,29 +20,29 @@
 ; CHECK-NOT: %[[#func2_ty]] = OpFunction
 ; CHECK-NOT: %[[#f2_decl]] = OpFunction
 
-define spir_kernel void @test(i32 addrspace(1)* %outOffsets) {
+define spir_kernel void @test(ptr addrspace(1) %outOffsets) {
 entry:
   %0 = call spir_func <3 x i64> @BuiltInGlobalOffset() #1
   %call = extractelement <3 x i64> %0, i32 0
   %conv = trunc i64 %call to i32
 ; CHECK: %[[#i1:]] = OpInBoundsPtrAccessChain %[[#i32ptr_ty]] %[[#outOffsets:]]
 ; CHECK: OpStore %[[#i1:]] %[[#]] Aligned 4
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %outOffsets, i64 0
-  store i32 %conv, i32 addrspace(1)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %outOffsets, i64 0
+  store i32 %conv, ptr addrspace(1) %arrayidx, align 4
   %1 = call spir_func <3 x i64> @BuiltInGlobalOffset() #1
   %call1 = extractelement <3 x i64> %1, i32 1
   %conv2 = trunc i64 %call1 to i32
 ; CHECK: %[[#i2:]] = OpInBoundsPtrAccessChain %[[#i32ptr_ty]] %[[#outOffsets]]
 ; CHECK: OpStore %[[#i2:]] %[[#]] Aligned 4
-  %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %outOffsets, i64 1
-  store i32 %conv2, i32 addrspace(1)* %arrayidx3, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr addrspace(1) %outOffsets, i64 1
+  store i32 %conv2, ptr addrspace(1) %arrayidx3, align 4
   %2 = call spir_func <3 x i64> @BuiltInGlobalOffset() #1
   %call4 = extractelement <3 x i64> %2, i32 2
   %conv5 = trunc i64 %call4 to i32
 ; CHECK: %[[#i3:]] = OpInBoundsPtrAccessChain %[[#i32ptr_ty]] %[[#outOffsets]]
 ; CHECK: OpStore %[[#i3:]] %[[#]] Aligned 4
-  %arrayidx6 = getelementptr inbounds i32, i32 addrspace(1)* %outOffsets, i64 2
-  store i32 %conv5, i32 addrspace(1)* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr addrspace(1) %outOffsets, i64 2
+  store i32 %conv5, ptr addrspace(1) %arrayidx6, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/opencl/basic/progvar_prog_scope_init.ll b/llvm/test/CodeGen/SPIRV/opencl/basic/progvar_prog_scope_init.ll
index fbc83c7a1e045..61aa9e1d9466e 100644
--- a/llvm/test/CodeGen/SPIRV/opencl/basic/progvar_prog_scope_init.ll
+++ b/llvm/test/CodeGen/SPIRV/opencl/basic/progvar_prog_scope_init.ll
@@ -15,7 +15,7 @@
 @var = addrspace(1) global i8 0, align 1
 @g_var = addrspace(1) global i8 1, align 1
 @a_var = addrspace(1) global [2 x i8] c"\01\01", align 1
- at p_var = addrspace(1) global i8 addrspace(1)* getelementptr inbounds ([2 x i8], [2 x i8] addrspace(1)* @a_var, i32 0, i64 1), align 8
+ at p_var = addrspace(1) global ptr addrspace(1) getelementptr inbounds ([2 x i8], ptr addrspace(1) @a_var, i32 0, i64 1), align 8
 
 define spir_func zeroext i8 @from_buf(i8 zeroext %a) {
 entry:
@@ -34,76 +34,76 @@ entry:
   ret i8 %conv
 }
 
-define spir_kernel void @writer(i8 addrspace(1)* %src, i32 %idx) {
+define spir_kernel void @writer(ptr addrspace(1) %src, i32 %idx) {
 entry:
-  %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %src, i64 0
-  %0 = load i8, i8 addrspace(1)* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %src, i64 0
+  %0 = load i8, ptr addrspace(1) %arrayidx, align 1
   %call = call spir_func zeroext i8 @from_buf(i8 zeroext %0)
   %i1trunc = trunc i8 %call to i1
   %frombool = select i1 %i1trunc, i8 1, i8 0
-  store i8 %frombool, i8 addrspace(1)* @var, align 1
-  %arrayidx1 = getelementptr inbounds i8, i8 addrspace(1)* %src, i64 1
-  %1 = load i8, i8 addrspace(1)* %arrayidx1, align 1
+  store i8 %frombool, ptr addrspace(1) @var, align 1
+  %arrayidx1 = getelementptr inbounds i8, ptr addrspace(1) %src, i64 1
+  %1 = load i8, ptr addrspace(1) %arrayidx1, align 1
   %call2 = call spir_func zeroext i8 @from_buf(i8 zeroext %1)
   %i1trunc1 = trunc i8 %call2 to i1
   %frombool3 = select i1 %i1trunc1, i8 1, i8 0
-  store i8 %frombool3, i8 addrspace(1)* @g_var, align 1
-  %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %src, i64 2
-  %2 = load i8, i8 addrspace(1)* %arrayidx4, align 1
+  store i8 %frombool3, ptr addrspace(1) @g_var, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr addrspace(1) %src, i64 2
+  %2 = load i8, ptr addrspace(1) %arrayidx4, align 1
   %call5 = call spir_func zeroext i8 @from_buf(i8 zeroext %2)
   %i1trunc2 = trunc i8 %call5 to i1
   %frombool6 = select i1 %i1trunc2, i8 1, i8 0
-  %3 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(1)* @a_var, i64 0, i64 0
-  store i8 %frombool6, i8 addrspace(1)* %3, align 1
-  %arrayidx7 = getelementptr inbounds i8, i8 addrspace(1)* %src, i64 3
-  %4 = load i8, i8 addrspace(1)* %arrayidx7, align 1
+  %3 = getelementptr inbounds [2 x i8], ptr addrspace(1) @a_var, i64 0, i64 0
+  store i8 %frombool6, ptr addrspace(1) %3, align 1
+  %arrayidx7 = getelementptr inbounds i8, ptr addrspace(1) %src, i64 3
+  %4 = load i8, ptr addrspace(1) %arrayidx7, align 1
   %call8 = call spir_func zeroext i8 @from_buf(i8 zeroext %4)
   %i1trunc3 = trunc i8 %call8 to i1
   %frombool9 = select i1 %i1trunc3, i8 1, i8 0
-  %5 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(1)* @a_var, i64 0, i64 1
-  store i8 %frombool9, i8 addrspace(1)* %5, align 1
+  %5 = getelementptr inbounds [2 x i8], ptr addrspace(1) @a_var, i64 0, i64 1
+  store i8 %frombool9, ptr addrspace(1) %5, align 1
   %idx.ext = zext i32 %idx to i64
-  %add.ptr = getelementptr inbounds i8, i8 addrspace(1)* %3, i64 %idx.ext
-  store i8 addrspace(1)* %add.ptr, i8 addrspace(1)* addrspace(1)* @p_var, align 8
+  %add.ptr = getelementptr inbounds i8, ptr addrspace(1) %3, i64 %idx.ext
+  store ptr addrspace(1) %add.ptr, ptr addrspace(1) @p_var, align 8
   ret void
 }
 
-define spir_kernel void @reader(i8 addrspace(1)* %dest, i8 zeroext %ptr_write_val) {
+define spir_kernel void @reader(ptr addrspace(1) %dest, i8 zeroext %ptr_write_val) {
 entry:
   %call = call spir_func zeroext i8 @from_buf(i8 zeroext %ptr_write_val)
   %i1trunc = trunc i8 %call to i1
-  %0 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* @p_var, align 8
+  %0 = load ptr addrspace(1), ptr addrspace(1) @p_var, align 8
   %frombool = select i1 %i1trunc, i8 1, i8 0
-  store volatile i8 %frombool, i8 addrspace(1)* %0, align 1
-  %1 = load i8, i8 addrspace(1)* @var, align 1
+  store volatile i8 %frombool, ptr addrspace(1) %0, align 1
+  %1 = load i8, ptr addrspace(1) @var, align 1
   %2 = and i8 %1, 1
   %tobool = icmp ne i8 %2, 0
   %i1promo = zext i1 %tobool to i8
   %call1 = call spir_func zeroext i8 @to_buf(i8 zeroext %i1promo)
-  %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %dest, i64 0
-  store i8 %call1, i8 addrspace(1)* %arrayidx, align 1
-  %3 = load i8, i8 addrspace(1)* @g_var, align 1
+  %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %dest, i64 0
+  store i8 %call1, ptr addrspace(1) %arrayidx, align 1
+  %3 = load i8, ptr addrspace(1) @g_var, align 1
   %4 = and i8 %3, 1
   %tobool2 = icmp ne i8 %4, 0
   %i1promo1 = zext i1 %tobool2 to i8
   %call3 = call spir_func zeroext i8 @to_buf(i8 zeroext %i1promo1)
-  %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %dest, i64 1
-  store i8 %call3, i8 addrspace(1)* %arrayidx4, align 1
-  %5 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(1)* @a_var, i64 0, i64 0
-  %6 = load i8, i8 addrspace(1)* %5, align 1
+  %arrayidx4 = getelementptr inbounds i8, ptr addrspace(1) %dest, i64 1
+  store i8 %call3, ptr addrspace(1) %arrayidx4, align 1
+  %5 = getelementptr inbounds [2 x i8], ptr addrspace(1) @a_var, i64 0, i64 0
+  %6 = load i8, ptr addrspace(1) %5, align 1
   %7 = and i8 %6, 1
   %tobool5 = icmp ne i8 %7, 0
   %i1promo2 = zext i1 %tobool5 to i8
   %call6 = call spir_func zeroext i8 @to_buf(i8 zeroext %i1promo2)
-  %arrayidx7 = getelementptr inbounds i8, i8 addrspace(1)* %dest, i64 2
-  store i8 %call6, i8 addrspace(1)* %arrayidx7, align 1
-  %8 = getelementptr inbounds [2 x i8], [2 x i8] addrspace(1)* @a_var, i64 0, i64 1
-  %9 = load i8, i8 addrspace(1)* %8, align 1
+  %arrayidx7 = getelementptr inbounds i8, ptr addrspace(1) %dest, i64 2
+  store i8 %call6, ptr addrspace(1) %arrayidx7, align 1
+  %8 = getelementptr inbounds [2 x i8], ptr addrspace(1) @a_var, i64 0, i64 1
+  %9 = load i8, ptr addrspace(1) %8, align 1
   %10 = and i8 %9, 1
   %tobool8 = icmp ne i8 %10, 0
   %i1promo3 = zext i1 %tobool8 to i8
   %call9 = call spir_func zeroext i8 @to_buf(i8 zeroext %i1promo3)
-  %arrayidx10 = getelementptr inbounds i8, i8 addrspace(1)* %dest, i64 3
-  store i8 %call9, i8 addrspace(1)* %arrayidx10, align 1
+  %arrayidx10 = getelementptr inbounds i8, ptr addrspace(1) %dest, i64 3
+  store i8 %call9, ptr addrspace(1) %arrayidx10, align 1
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/opencl/basic/progvar_prog_scope_uninit.ll b/llvm/test/CodeGen/SPIRV/opencl/basic/progvar_prog_scope_uninit.ll
index fe02ba650aaa0..77b97b9090d80 100644
--- a/llvm/test/CodeGen/SPIRV/opencl/basic/progvar_prog_scope_uninit.ll
+++ b/llvm/test/CodeGen/SPIRV/opencl/basic/progvar_prog_scope_uninit.ll
@@ -10,7 +10,7 @@
 @var = addrspace(1) global <2 x i8> zeroinitializer, align 2
 @g_var = addrspace(1) global <2 x i8> zeroinitializer, align 2
 @a_var = addrspace(1) global [2 x <2 x i8>] zeroinitializer, align 2
- at p_var = addrspace(1) global <2 x i8> addrspace(1)* null, align 8
+ at p_var = addrspace(1) global ptr addrspace(1) null, align 8
 
 define spir_func <2 x i8> @from_buf(<2 x i8> %a) {
 entry:
@@ -22,9 +22,9 @@ entry:
   ret <2 x i8> %a
 }
 
-define spir_kernel void @global_check(i32 addrspace(1)* %out) {
+define spir_kernel void @global_check(ptr addrspace(1) %out) {
 entry:
-  %0 = load <2 x i8>, <2 x i8> addrspace(1)* @var, align 2
+  %0 = load <2 x i8>, ptr addrspace(1) @var, align 2
   %cmp = icmp eq <2 x i8> %0, zeroinitializer
   %sext = select <2 x i1> %cmp, <2 x i8> <i8 -1, i8 -1>, <2 x i8> zeroinitializer
   %cast = icmp slt <2 x i8> %sext, zeroinitializer
@@ -37,7 +37,7 @@ entry:
   %and = and i32 %conv, %call
   %tobool1 = icmp ne i32 %and, 0
   %frombool = select i1 %tobool1, i8 1, i8 0
-  %2 = load <2 x i8>, <2 x i8> addrspace(1)* @g_var, align 2
+  %2 = load <2 x i8>, ptr addrspace(1) @g_var, align 2
   %cmp2 = icmp eq <2 x i8> %2, zeroinitializer
   %sext3 = select <2 x i1> %cmp2, <2 x i8> <i8 -1, i8 -1>, <2 x i8> zeroinitializer
   %cast2 = icmp slt <2 x i8> %sext3, zeroinitializer
@@ -50,8 +50,8 @@ entry:
   %and7 = and i32 %conv6, %call4
   %tobool8 = icmp ne i32 %and7, 0
   %frombool9 = select i1 %tobool8, i8 1, i8 0
-  %4 = getelementptr inbounds [2 x <2 x i8>], [2 x <2 x i8>] addrspace(1)* @a_var, i64 0, i64 0
-  %5 = load <2 x i8>, <2 x i8> addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds [2 x <2 x i8>], ptr addrspace(1) @a_var, i64 0, i64 0
+  %5 = load <2 x i8>, ptr addrspace(1) %4, align 2
   %cmp10 = icmp eq <2 x i8> %5, zeroinitializer
   %sext11 = select <2 x i1> %cmp10, <2 x i8> <i8 -1, i8 -1>, <2 x i8> zeroinitializer
   %cast4 = icmp slt <2 x i8> %sext11, zeroinitializer
@@ -64,8 +64,8 @@ entry:
   %and15 = and i32 %conv14, %call12
   %tobool16 = icmp ne i32 %and15, 0
   %frombool17 = select i1 %tobool16, i8 1, i8 0
-  %7 = getelementptr inbounds [2 x <2 x i8>], [2 x <2 x i8>] addrspace(1)* @a_var, i64 0, i64 1
-  %8 = load <2 x i8>, <2 x i8> addrspace(1)* %7, align 2
+  %7 = getelementptr inbounds [2 x <2 x i8>], ptr addrspace(1) @a_var, i64 0, i64 1
+  %8 = load <2 x i8>, ptr addrspace(1) %7, align 2
   %cmp18 = icmp eq <2 x i8> %8, zeroinitializer
   %sext19 = select <2 x i1> %cmp18, <2 x i8> <i8 -1, i8 -1>, <2 x i8> zeroinitializer
   %cast6 = icmp slt <2 x i8> %sext19, zeroinitializer
@@ -78,9 +78,9 @@ entry:
   %and23 = and i32 %conv22, %call20
   %tobool24 = icmp ne i32 %and23, 0
   %frombool25 = select i1 %tobool24, i8 1, i8 0
-  %10 = load <2 x i8> addrspace(1)*, <2 x i8> addrspace(1)* addrspace(1)* @p_var, align 8
-  %11 = ptrtoint <2 x i8> addrspace(1)* %10 to i64
-  %12 = ptrtoint <2 x i8> addrspace(1)* null to i64
+  %10 = load ptr addrspace(1), ptr addrspace(1) @p_var, align 8
+  %11 = ptrtoint ptr addrspace(1) %10 to i64
+  %12 = ptrtoint ptr addrspace(1) null to i64
   %cmp26 = icmp eq i64 %11, %12
   %conv27 = select i1 %cmp26, i32 1, i32 0
   %13 = and i8 %frombool25, 1
@@ -93,60 +93,60 @@ entry:
   %tobool33 = icmp ne i8 %14, 0
   %15 = select i1 %tobool33, i64 1, i64 0
   %cond = select i1 %tobool33, i32 1, i32 0
-  store i32 %cond, i32 addrspace(1)* %out, align 4
+  store i32 %cond, ptr addrspace(1) %out, align 4
   ret void
 }
 
 declare spir_func i1 @OpAll_v2i8(<2 x i8>)
 
-define spir_kernel void @writer(<2 x i8> addrspace(1)* %src, i32 %idx) {
+define spir_kernel void @writer(ptr addrspace(1) %src, i32 %idx) {
 entry:
-  %arrayidx = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %src, i64 0
-  %0 = load <2 x i8>, <2 x i8> addrspace(1)* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds <2 x i8>, ptr addrspace(1) %src, i64 0
+  %0 = load <2 x i8>, ptr addrspace(1) %arrayidx, align 2
   %call = call spir_func <2 x i8> @from_buf(<2 x i8> %0)
-  store <2 x i8> %call, <2 x i8> addrspace(1)* @var, align 2
-  %arrayidx1 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %src, i64 1
-  %1 = load <2 x i8>, <2 x i8> addrspace(1)* %arrayidx1, align 2
+  store <2 x i8> %call, ptr addrspace(1) @var, align 2
+  %arrayidx1 = getelementptr inbounds <2 x i8>, ptr addrspace(1) %src, i64 1
+  %1 = load <2 x i8>, ptr addrspace(1) %arrayidx1, align 2
   %call2 = call spir_func <2 x i8> @from_buf(<2 x i8> %1)
-  store <2 x i8> %call2, <2 x i8> addrspace(1)* @g_var, align 2
-  %arrayidx3 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %src, i64 2
-  %2 = load <2 x i8>, <2 x i8> addrspace(1)* %arrayidx3, align 2
+  store <2 x i8> %call2, ptr addrspace(1) @g_var, align 2
+  %arrayidx3 = getelementptr inbounds <2 x i8>, ptr addrspace(1) %src, i64 2
+  %2 = load <2 x i8>, ptr addrspace(1) %arrayidx3, align 2
   %call4 = call spir_func <2 x i8> @from_buf(<2 x i8> %2)
-  %3 = getelementptr inbounds [2 x <2 x i8>], [2 x <2 x i8>] addrspace(1)* @a_var, i64 0, i64 0
-  store <2 x i8> %call4, <2 x i8> addrspace(1)* %3, align 2
-  %arrayidx5 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %src, i64 3
-  %4 = load <2 x i8>, <2 x i8> addrspace(1)* %arrayidx5, align 2
+  %3 = getelementptr inbounds [2 x <2 x i8>], ptr addrspace(1) @a_var, i64 0, i64 0
+  store <2 x i8> %call4, ptr addrspace(1) %3, align 2
+  %arrayidx5 = getelementptr inbounds <2 x i8>, ptr addrspace(1) %src, i64 3
+  %4 = load <2 x i8>, ptr addrspace(1) %arrayidx5, align 2
   %call6 = call spir_func <2 x i8> @from_buf(<2 x i8> %4)
-  %5 = getelementptr inbounds [2 x <2 x i8>], [2 x <2 x i8>] addrspace(1)* @a_var, i64 0, i64 1
-  store <2 x i8> %call6, <2 x i8> addrspace(1)* %5, align 2
+  %5 = getelementptr inbounds [2 x <2 x i8>], ptr addrspace(1) @a_var, i64 0, i64 1
+  store <2 x i8> %call6, ptr addrspace(1) %5, align 2
   %idx.ext = zext i32 %idx to i64
-  %add.ptr = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %3, i64 %idx.ext
-  store <2 x i8> addrspace(1)* %add.ptr, <2 x i8> addrspace(1)* addrspace(1)* @p_var, align 8
+  %add.ptr = getelementptr inbounds <2 x i8>, ptr addrspace(1) %3, i64 %idx.ext
+  store ptr addrspace(1) %add.ptr, ptr addrspace(1) @p_var, align 8
   ret void
 }
 
-define spir_kernel void @reader(<2 x i8> addrspace(1)* %dest, <2 x i8> %ptr_write_val) {
+define spir_kernel void @reader(ptr addrspace(1) %dest, <2 x i8> %ptr_write_val) {
 entry:
   %call = call spir_func <2 x i8> @from_buf(<2 x i8> %ptr_write_val)
-  %0 = load <2 x i8> addrspace(1)*, <2 x i8> addrspace(1)* addrspace(1)* @p_var, align 8
-  store <2 x i8> %call, <2 x i8> addrspace(1)* %0, align 2
-  %1 = load <2 x i8>, <2 x i8> addrspace(1)* @var, align 2
+  %0 = load ptr addrspace(1), ptr addrspace(1) @p_var, align 8
+  store <2 x i8> %call, ptr addrspace(1) %0, align 2
+  %1 = load <2 x i8>, ptr addrspace(1) @var, align 2
   %call1 = call spir_func <2 x i8> @to_buf(<2 x i8> %1)
-  %arrayidx = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %dest, i64 0
-  store <2 x i8> %call1, <2 x i8> addrspace(1)* %arrayidx, align 2
-  %2 = load <2 x i8>, <2 x i8> addrspace(1)* @g_var, align 2
+  %arrayidx = getelementptr inbounds <2 x i8>, ptr addrspace(1) %dest, i64 0
+  store <2 x i8> %call1, ptr addrspace(1) %arrayidx, align 2
+  %2 = load <2 x i8>, ptr addrspace(1) @g_var, align 2
   %call2 = call spir_func <2 x i8> @to_buf(<2 x i8> %2)
-  %arrayidx3 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %dest, i64 1
-  store <2 x i8> %call2, <2 x i8> addrspace(1)* %arrayidx3, align 2
-  %3 = getelementptr inbounds [2 x <2 x i8>], [2 x <2 x i8>] addrspace(1)* @a_var, i64 0, i64 0
-  %4 = load <2 x i8>, <2 x i8> addrspace(1)* %3, align 2
+  %arrayidx3 = getelementptr inbounds <2 x i8>, ptr addrspace(1) %dest, i64 1
+  store <2 x i8> %call2, ptr addrspace(1) %arrayidx3, align 2
+  %3 = getelementptr inbounds [2 x <2 x i8>], ptr addrspace(1) @a_var, i64 0, i64 0
+  %4 = load <2 x i8>, ptr addrspace(1) %3, align 2
   %call4 = call spir_func <2 x i8> @to_buf(<2 x i8> %4)
-  %arrayidx5 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %dest, i64 2
-  store <2 x i8> %call4, <2 x i8> addrspace(1)* %arrayidx5, align 2
-  %5 = getelementptr inbounds [2 x <2 x i8>], [2 x <2 x i8>] addrspace(1)* @a_var, i64 0, i64 1
-  %6 = load <2 x i8>, <2 x i8> addrspace(1)* %5, align 2
+  %arrayidx5 = getelementptr inbounds <2 x i8>, ptr addrspace(1) %dest, i64 2
+  store <2 x i8> %call4, ptr addrspace(1) %arrayidx5, align 2
+  %5 = getelementptr inbounds [2 x <2 x i8>], ptr addrspace(1) @a_var, i64 0, i64 1
+  %6 = load <2 x i8>, ptr addrspace(1) %5, align 2
   %call6 = call spir_func <2 x i8> @to_buf(<2 x i8> %6)
-  %arrayidx7 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %dest, i64 3
-  store <2 x i8> %call6, <2 x i8> addrspace(1)* %arrayidx7, align 2
+  %arrayidx7 = getelementptr inbounds <2 x i8>, ptr addrspace(1) %dest, i64 3
+  store <2 x i8> %call6, ptr addrspace(1) %arrayidx7, align 2
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/opencl/device_execution/execute_block.ll b/llvm/test/CodeGen/SPIRV/opencl/device_execution/execute_block.ll
index 562f5c7b6826e..e1170cf3c3bf0 100644
--- a/llvm/test/CodeGen/SPIRV/opencl/device_execution/execute_block.ll
+++ b/llvm/test/CodeGen/SPIRV/opencl/device_execution/execute_block.ll
@@ -6,39 +6,39 @@
 ; CHECK: %[[#true:]] = OpConstantTrue %[[#bool]]
 ; CHECK: OpBranchConditional %[[#true]]
 
-%structtype = type { i32, i32, i8 addrspace(4)* }
-%structtype.0 = type <{ i32, i32, i8 addrspace(4)* }>
+%structtype = type { i32, i32, ptr addrspace(4) }
+%structtype.0 = type <{ i32, i32, ptr addrspace(4) }>
 
- at __block_literal_global = internal addrspace(1) constant %structtype { i32 16, i32 8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*) }, align 8
- at __block_literal_global.1 = internal addrspace(1) constant %structtype { i32 16, i32 8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*) }, align 8
- at __block_literal_global.2 = internal addrspace(1) constant %structtype { i32 16, i32 8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*) }, align 8
+ at __block_literal_global = internal addrspace(1) constant %structtype { i32 16, i32 8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)) }, align 8
+ at __block_literal_global.1 = internal addrspace(1) constant %structtype { i32 16, i32 8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)) }, align 8
+ at __block_literal_global.2 = internal addrspace(1) constant %structtype { i32 16, i32 8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)) }, align 8
 
-define spir_kernel void @block_typedef_mltpl_stmnt(i32 addrspace(1)* %res) {
+define spir_kernel void @block_typedef_mltpl_stmnt(ptr addrspace(1) %res) {
 entry:
   %0 = call spir_func <3 x i64> @BuiltInGlobalInvocationId()
   %call = extractelement <3 x i64> %0, i32 0
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %res, i64 %call
-  store i32 -1, i32 addrspace(1)* %arrayidx, align 4
-  %1 = bitcast %structtype addrspace(1)* @__block_literal_global to i8 addrspace(1)*
-  %2 = addrspacecast i8 addrspace(1)* %1 to i8 addrspace(4)*
-  %3 = bitcast %structtype addrspace(1)* @__block_literal_global.1 to i8 addrspace(1)*
-  %4 = addrspacecast i8 addrspace(1)* %3 to i8 addrspace(4)*
-  %5 = bitcast %structtype addrspace(1)* @__block_literal_global.2 to i8 addrspace(1)*
-  %6 = addrspacecast i8 addrspace(1)* %5 to i8 addrspace(4)*
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %res, i64 %call
+  store i32 -1, ptr addrspace(1) %arrayidx, align 4
+  %1 = bitcast ptr addrspace(1) @__block_literal_global to ptr addrspace(1)
+  %2 = addrspacecast ptr addrspace(1) %1 to ptr addrspace(4)
+  %3 = bitcast ptr addrspace(1) @__block_literal_global.1 to ptr addrspace(1)
+  %4 = addrspacecast ptr addrspace(1) %3 to ptr addrspace(4)
+  %5 = bitcast ptr addrspace(1) @__block_literal_global.2 to ptr addrspace(1)
+  %6 = addrspacecast ptr addrspace(1) %5 to ptr addrspace(4)
   br label %do.body
 
 do.body:                                          ; preds = %do.cond, %entry
   %a.0 = phi i32 [ undef, %entry ], [ %a.1, %do.cond ]
-  %call1 = call spir_func float @__block_typedef_mltpl_stmnt_block_invoke(i8 addrspace(4)* %2, float 0.000000e+00)
-  %call2 = call spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_2(i8 addrspace(4)* %4, i32 0)
+  %call1 = call spir_func float @__block_typedef_mltpl_stmnt_block_invoke(ptr addrspace(4) %2, float 0.000000e+00)
+  %call2 = call spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_2(ptr addrspace(4) %4, i32 0)
   %conv = sitofp i32 %call2 to float
   %sub = fsub float %call1, %conv
   %cmp = fcmp ogt float %sub, 0.000000e+00
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %do.body
-  %call4 = call spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_3(i8 addrspace(4)* %6, i32 1)
-  %call5 = call spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_3(i8 addrspace(4)* %6, i32 2)
+  %call4 = call spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_3(ptr addrspace(4) %6, i32 1)
+  %call5 = call spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_3(ptr addrspace(4) %6, i32 2)
   %add = add i32 %call4, %call5
   br label %cleanup
 
@@ -61,33 +61,33 @@ do.cond:                                          ; preds = %cleanup.cont
 
 do.end:                                           ; preds = %do.cond, %cleanup
   %sub7 = sub nsw i32 %a.1, 11
-  %arrayidx8 = getelementptr inbounds i32, i32 addrspace(1)* %res, i64 %call
-  store i32 %sub7, i32 addrspace(1)* %arrayidx8, align 4
+  %arrayidx8 = getelementptr inbounds i32, ptr addrspace(1) %res, i64 %call
+  store i32 %sub7, ptr addrspace(1) %arrayidx8, align 4
   ret void
 
 unreachable:                                      ; preds = %cleanup
   unreachable
 }
 
-define internal spir_func float @__block_typedef_mltpl_stmnt_block_invoke(i8 addrspace(4)* %.block_descriptor, float %bi) {
+define internal spir_func float @__block_typedef_mltpl_stmnt_block_invoke(ptr addrspace(4) %.block_descriptor, float %bi) {
 entry:
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to %structtype.0 addrspace(4)*
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
   %conv = fpext float %bi to double
   %add = fadd double %conv, 3.300000e+00
   %conv1 = fptrunc double %add to float
   ret float %conv1
 }
 
-define internal spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_2(i8 addrspace(4)* %.block_descriptor, i32 %bi) {
+define internal spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_2(ptr addrspace(4) %.block_descriptor, i32 %bi) {
 entry:
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to %structtype.0 addrspace(4)*
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
   %add = add nsw i32 %bi, 2
   ret i32 %add
 }
 
-define internal spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_3(i8 addrspace(4)* %.block_descriptor, i32 %bi) {
+define internal spir_func i32 @__block_typedef_mltpl_stmnt_block_invoke_3(ptr addrspace(4) %.block_descriptor, i32 %bi) {
 entry:
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to %structtype.0 addrspace(4)*
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
   %add = add i32 %bi, 4
   ret i32 %add
 }
diff --git a/llvm/test/CodeGen/SPIRV/opencl/image.ll b/llvm/test/CodeGen/SPIRV/opencl/image.ll
index b1150cfb2ff71..b60e4bf2db714 100644
--- a/llvm/test/CodeGen/SPIRV/opencl/image.ll
+++ b/llvm/test/CodeGen/SPIRV/opencl/image.ll
@@ -14,12 +14,12 @@ define void @foo(
   target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %a,
   target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 1) %b,
   target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 2) %c,
-  i32 addrspace(1)* %d
+  ptr addrspace(1) %d
 ) {
   %pixel = call <4 x i32> @_Z11read_imagei14ocl_image1d_roi(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %a, i32 0)
   call void @_Z12write_imagei14ocl_image2d_woDv2_iDv4_i(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 1) %b, <2 x i32> zeroinitializer, <4 x i32> %pixel)
   %size = call i32 @_Z15get_image_width14ocl_image3d_rw(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 2) %c)
-  store i32 %size, i32 addrspace(1)* %d
+  store i32 %size, ptr addrspace(1) %d
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll b/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
index d3b63ec9e1094..f6b286f8500b7 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/two-bitcast-or-param-users.ll
@@ -5,8 +5,8 @@
 ; CHECK-DAG: %[[#GLOBAL_PTR_INT:]] = OpTypePointer CrossWorkgroup %[[#INT]]
 
 define i32 @foo(i32 %a, ptr addrspace(1) %p) {
-  store i32 %a, i32 addrspace(1)* %p
-  %b = load i32, i32 addrspace(1)* %p
+  store i32 %a, ptr addrspace(1) %p
+  %b = load i32, ptr addrspace(1) %p
   ret i32 %b
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll b/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
index 8c01df44563ef..1a491c45ea35d 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/two-subsequent-bitcasts.ll
@@ -4,14 +4,14 @@
 ; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
 ; CHECK-DAG: %[[#pointer:]] = OpTypePointer CrossWorkgroup %[[#float]]
 
-define void @foo(float addrspace(1)* %A, i32 %B) {
+define void @foo(ptr addrspace(1) %A, i32 %B) {
   %cmp = icmp sgt i32 %B, 0
   %conv = uitofp i1 %cmp to float
 ; CHECK-DAG: %[[#utof_res:]] = OpConvertUToF %[[#float]] %[[#]]
 ; CHECK-DAG: %[[#bitcastORparam:]] = {{OpBitcast|OpFunctionParameter}}{{.*}}%[[#pointer]]{{.*}}
 ; CHECK: OpStore %[[#bitcastORparam]] %[[#utof_res]]
-  %BC1 = bitcast float addrspace(1)* %A to i32 addrspace(1)*
-  %BC2 = bitcast i32 addrspace(1)* %BC1 to float addrspace(1)*
-  store float %conv, float addrspace(1)* %BC2, align 4;
+  %BC1 = bitcast ptr addrspace(1) %A to ptr addrspace(1)
+  %BC2 = bitcast ptr addrspace(1) %BC1 to ptr addrspace(1)
+  store float %conv, ptr addrspace(1) %BC2, align 4;
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll b/llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll
index 9863881c31331..6830bcc8623a5 100644
--- a/llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll
+++ b/llvm/test/CodeGen/SPIRV/scoped_atomicrmw.ll
@@ -24,27 +24,27 @@
 
 define dso_local spir_func void @test_singlethread_atomicrmw() local_unnamed_addr {
 entry:
-  %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %0 = atomicrmw xchg ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 syncscope("singlethread") seq_cst
+  %1 = atomicrmw xchg ptr addrspace(1) @f, float 42.000000e+00 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
-  %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %2 = atomicrmw add ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %3 = atomicrmw sub ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %4 = atomicrmw or ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %5 = atomicrmw xor ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %6 = atomicrmw and ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %7 = atomicrmw max ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %8 = atomicrmw min ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %9 = atomicrmw umax ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 syncscope("singlethread") seq_cst
+  %10 = atomicrmw umin ptr addrspace(1) @ui, i32 42 syncscope("singlethread") seq_cst
   ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Invocation:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
 
   ret void
@@ -52,27 +52,27 @@ entry:
 
 define dso_local spir_func void @test_subgroup_atomicrmw() local_unnamed_addr {
 entry:
-  %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %0 = atomicrmw xchg ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 syncscope("subgroup") seq_cst
+  %1 = atomicrmw xchg ptr addrspace(1) @f, float 42.000000e+00 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
-  %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %2 = atomicrmw add ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %3 = atomicrmw sub ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %4 = atomicrmw or ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %5 = atomicrmw xor ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %6 = atomicrmw and ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %7 = atomicrmw max ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %8 = atomicrmw min ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %9 = atomicrmw umax ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 syncscope("subgroup") seq_cst
+  %10 = atomicrmw umin ptr addrspace(1) @ui, i32 42 syncscope("subgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Subgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
 
   ret void
@@ -80,27 +80,27 @@ entry:
 
 define dso_local spir_func void @test_workgroup_atomicrmw() local_unnamed_addr {
 entry:
-  %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %0 = atomicrmw xchg ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 syncscope("workgroup") seq_cst
+  %1 = atomicrmw xchg ptr addrspace(1) @f, float 42.000000e+00 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
-  %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %2 = atomicrmw add ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %3 = atomicrmw sub ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %4 = atomicrmw or ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %5 = atomicrmw xor ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %6 = atomicrmw and ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %7 = atomicrmw max ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %8 = atomicrmw min ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %9 = atomicrmw umax ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 syncscope("workgroup") seq_cst
+  %10 = atomicrmw umin ptr addrspace(1) @ui, i32 42 syncscope("workgroup") seq_cst
   ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Workgroup:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
 
   ret void
@@ -108,27 +108,27 @@ entry:
 
 define dso_local spir_func void @test_device_atomicrmw() local_unnamed_addr {
 entry:
-  %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %0 = atomicrmw xchg ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 syncscope("device") seq_cst
+  %1 = atomicrmw xchg ptr addrspace(1) @f, float 42.000000e+00 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
-  %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %2 = atomicrmw add ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %3 = atomicrmw sub ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %4 = atomicrmw or ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %5 = atomicrmw xor ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %6 = atomicrmw and ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %7 = atomicrmw max ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %8 = atomicrmw min ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %9 = atomicrmw umax ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 syncscope("device") seq_cst
+  %10 = atomicrmw umin ptr addrspace(1) @ui, i32 42 syncscope("device") seq_cst
   ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_Device:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
 
   ret void
@@ -136,27 +136,27 @@ entry:
 
 define dso_local spir_func void @test_all_svm_devices_atomicrmw() local_unnamed_addr {
 entry:
-  %0 = atomicrmw xchg i32 addrspace(1)* @ui, i32 42 seq_cst
+  %0 = atomicrmw xchg ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %1 = atomicrmw xchg float addrspace(1)* @f, float 42.000000e+00 seq_cst
+  %1 = atomicrmw xchg ptr addrspace(1) @f, float 42.000000e+00 seq_cst
   ; CHECK: %[[#]] = OpAtomicExchange %[[#Float:]] %[[#FPPointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#FPValue:]]
-  %2 = atomicrmw add i32 addrspace(1)* @ui, i32 42 seq_cst
+  %2 = atomicrmw add ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicIAdd %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %3 = atomicrmw sub i32 addrspace(1)* @ui, i32 42 seq_cst
+  %3 = atomicrmw sub ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicISub %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %4 = atomicrmw or i32 addrspace(1)* @ui, i32 42 seq_cst
+  %4 = atomicrmw or ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicOr %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %5 = atomicrmw xor i32 addrspace(1)* @ui, i32 42 seq_cst
+  %5 = atomicrmw xor ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicXor %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %6 = atomicrmw and i32 addrspace(1)* @ui, i32 42 seq_cst
+  %6 = atomicrmw and ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicAnd %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %7 = atomicrmw max i32 addrspace(1)* @ui, i32 42 seq_cst
+  %7 = atomicrmw max ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicSMax %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %8 = atomicrmw min i32 addrspace(1)* @ui, i32 42 seq_cst
+  %8 = atomicrmw min ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicSMin %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %9 = atomicrmw umax i32 addrspace(1)* @ui, i32 42 seq_cst
+  %9 = atomicrmw umax ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicUMax %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
-  %10 = atomicrmw umin i32 addrspace(1)* @ui, i32 42 seq_cst
+  %10 = atomicrmw umin ptr addrspace(1) @ui, i32 42 seq_cst
   ; CHECK: %[[#]] = OpAtomicUMin %[[#Int]] %[[#Pointer:]] %[[#Scope_CrossDevice:]] %[[#MemSem_SeqCst:]] %[[#Value:]]
 
   ret void
diff --git a/llvm/test/CodeGen/SPIRV/select-builtin.ll b/llvm/test/CodeGen/SPIRV/select-builtin.ll
index b4601d33dd38f..a616cc6193718 100644
--- a/llvm/test/CodeGen/SPIRV/select-builtin.ll
+++ b/llvm/test/CodeGen/SPIRV/select-builtin.ll
@@ -6,11 +6,11 @@
 
 ;; LLVM IR was generated with -cl-std=c++ option
 
-define spir_kernel void @test(i32 %op1, i32 %op2, i32 addrspace(1)* %out) {
+define spir_kernel void @test(i32 %op1, i32 %op2, ptr addrspace(1) %out) {
 entry:
   %0 = trunc i8 undef to i1
   %call = call spir_func i32 @_Z14__spirv_Selectbii(i1 zeroext %0, i32 %op1, i32 %op2)
-  store i32 %call, i32 addrspace(1)* %out
+  store i32 %call, ptr addrspace(1) %out
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/simple.ll b/llvm/test/CodeGen/SPIRV/simple.ll
index 63c15968c7253..7f772ff2e0e97 100644
--- a/llvm/test/CodeGen/SPIRV/simple.ll
+++ b/llvm/test/CodeGen/SPIRV/simple.ll
@@ -4,28 +4,28 @@
 ;; Support of doubles is required.
 ; CHECK: OpCapability Float64
 ; CHECK: "fun01"
-define spir_kernel void @fun01(i32 addrspace(1)* noalias %a, i32 addrspace(1)* %b, i32 %c) {
+define spir_kernel void @fun01(ptr addrspace(1) noalias %a, ptr addrspace(1) %b, i32 %c) {
 entry:
-  %a.addr = alloca i32 addrspace(1)*, align 8
-  %b.addr = alloca i32 addrspace(1)*, align 8
+  %a.addr = alloca ptr addrspace(1), align 8
+  %b.addr = alloca ptr addrspace(1), align 8
   %c.addr = alloca i32, align 4
-  store i32 addrspace(1)* %a, i32 addrspace(1)** %a.addr, align 8
-  store i32 addrspace(1)* %b, i32 addrspace(1)** %b.addr, align 8
-  store i32 %c, i32* %c.addr, align 4
-  %0 = load i32 addrspace(1)*, i32 addrspace(1)** %b.addr, align 8
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 0
-  %1 = load i32, i32 addrspace(1)* %arrayidx, align 4
-  %2 = load i32 addrspace(1)*, i32 addrspace(1)** %a.addr, align 8
-  %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %2, i64 0
-  store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
-  %3 = load i32 addrspace(1)*, i32 addrspace(1)** %b.addr, align 8
-  %cmp = icmp ugt i32 addrspace(1)* %3, null
+  store ptr addrspace(1) %a, ptr %a.addr, align 8
+  store ptr addrspace(1) %b, ptr %b.addr, align 8
+  store i32 %c, ptr %c.addr, align 4
+  %0 = load ptr addrspace(1), ptr %b.addr, align 8
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %0, i64 0
+  %1 = load i32, ptr addrspace(1) %arrayidx, align 4
+  %2 = load ptr addrspace(1), ptr %a.addr, align 8
+  %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %2, i64 0
+  store i32 %1, ptr addrspace(1) %arrayidx1, align 4
+  %3 = load ptr addrspace(1), ptr %b.addr, align 8
+  %cmp = icmp ugt ptr addrspace(1) %3, null
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %4 = load i32 addrspace(1)*, i32 addrspace(1)** %a.addr, align 8
-  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %4, i64 0
-  store i32 2, i32 addrspace(1)* %arrayidx2, align 4
+  %4 = load ptr addrspace(1), ptr %a.addr, align 8
+  %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %4, i64 0
+  store i32 2, ptr addrspace(1) %arrayidx2, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
@@ -33,49 +33,49 @@ if.end:                                           ; preds = %if.then, %entry
 }
 
 ; CHECK: "fun02"
-define spir_kernel void @fun02(double addrspace(1)* %a, double addrspace(1)* %b, i32 %c) {
+define spir_kernel void @fun02(ptr addrspace(1) %a, ptr addrspace(1) %b, i32 %c) {
 entry:
-  %a.addr = alloca double addrspace(1)*, align 8
-  %b.addr = alloca double addrspace(1)*, align 8
+  %a.addr = alloca ptr addrspace(1), align 8
+  %b.addr = alloca ptr addrspace(1), align 8
   %c.addr = alloca i32, align 4
-  store double addrspace(1)* %a, double addrspace(1)** %a.addr, align 8
-  store double addrspace(1)* %b, double addrspace(1)** %b.addr, align 8
-  store i32 %c, i32* %c.addr, align 4
-  %0 = load i32, i32* %c.addr, align 4
+  store ptr addrspace(1) %a, ptr %a.addr, align 8
+  store ptr addrspace(1) %b, ptr %b.addr, align 8
+  store i32 %c, ptr %c.addr, align 4
+  %0 = load i32, ptr %c.addr, align 4
   %idxprom = sext i32 %0 to i64
-  %1 = load double addrspace(1)*, double addrspace(1)** %b.addr, align 8
-  %arrayidx = getelementptr inbounds double, double addrspace(1)* %1, i64 %idxprom
-  %2 = load double, double addrspace(1)* %arrayidx, align 8
-  %3 = load i32, i32* %c.addr, align 4
+  %1 = load ptr addrspace(1), ptr %b.addr, align 8
+  %arrayidx = getelementptr inbounds double, ptr addrspace(1) %1, i64 %idxprom
+  %2 = load double, ptr addrspace(1) %arrayidx, align 8
+  %3 = load i32, ptr %c.addr, align 4
   %idxprom1 = sext i32 %3 to i64
-  %4 = load double addrspace(1)*, double addrspace(1)** %a.addr, align 8
-  %arrayidx2 = getelementptr inbounds double, double addrspace(1)* %4, i64 %idxprom1
-  store double %2, double addrspace(1)* %arrayidx2, align 8
+  %4 = load ptr addrspace(1), ptr %a.addr, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr addrspace(1) %4, i64 %idxprom1
+  store double %2, ptr addrspace(1) %arrayidx2, align 8
   ret void
 }
 
 ; CHECK: "test_builtin"
-define spir_func void @test_builtin(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
+define spir_func void @test_builtin(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 entry:
-  %in.addr = alloca i32 addrspace(1)*, align 8
-  %out.addr = alloca i32 addrspace(1)*, align 8
+  %in.addr = alloca ptr addrspace(1), align 8
+  %out.addr = alloca ptr addrspace(1), align 8
   %n = alloca i32, align 4
-  store i32 addrspace(1)* %in, i32 addrspace(1)** %in.addr, align 8
-  store i32 addrspace(1)* %out, i32 addrspace(1)** %out.addr, align 8
+  store ptr addrspace(1) %in, ptr %in.addr, align 8
+  store ptr addrspace(1) %out, ptr %out.addr, align 8
   %call = call spir_func i64 @_Z13get_global_idj(i32 0)
   %conv = trunc i64 %call to i32
-  store i32 %conv, i32* %n, align 4
-  %0 = load i32, i32* %n, align 4
+  store i32 %conv, ptr %n, align 4
+  %0 = load i32, ptr %n, align 4
   %idxprom = sext i32 %0 to i64
-  %1 = load i32 addrspace(1)*, i32 addrspace(1)** %in.addr, align 8
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %1, i64 %idxprom
-  %2 = load i32, i32 addrspace(1)* %arrayidx, align 4
+  %1 = load ptr addrspace(1), ptr %in.addr, align 8
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %1, i64 %idxprom
+  %2 = load i32, ptr addrspace(1) %arrayidx, align 4
   %call1 = call spir_func i32 @_Z3absi(i32 %2)
-  %3 = load i32, i32* %n, align 4
+  %3 = load i32, ptr %n, align 4
   %idxprom2 = sext i32 %3 to i64
-  %4 = load i32 addrspace(1)*, i32 addrspace(1)** %out.addr, align 8
-  %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %4, i64 %idxprom2
-  store i32 %call1, i32 addrspace(1)* %arrayidx3, align 4
+  %4 = load ptr addrspace(1), ptr %out.addr, align 8
+  %arrayidx3 = getelementptr inbounds i32, ptr addrspace(1) %4, i64 %idxprom2
+  store i32 %call1, ptr addrspace(1) %arrayidx3, align 4
   ret void
 }
 
@@ -89,33 +89,33 @@ declare spir_func i32 @_Z3absi(i32)
 define spir_func i32 @myabs(i32 %x) {
 entry:
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %0 = load i32, i32* %x.addr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
   %call = call spir_func i32 @_Z3absi(i32 %0)
   ret i32 %call
 }
 
 ; CHECK: "test_function_call"
-define spir_func void @test_function_call(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
+define spir_func void @test_function_call(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 entry:
-  %in.addr = alloca i32 addrspace(1)*, align 8
-  %out.addr = alloca i32 addrspace(1)*, align 8
+  %in.addr = alloca ptr addrspace(1), align 8
+  %out.addr = alloca ptr addrspace(1), align 8
   %n = alloca i32, align 4
-  store i32 addrspace(1)* %in, i32 addrspace(1)** %in.addr, align 8
-  store i32 addrspace(1)* %out, i32 addrspace(1)** %out.addr, align 8
+  store ptr addrspace(1) %in, ptr %in.addr, align 8
+  store ptr addrspace(1) %out, ptr %out.addr, align 8
   %call = call spir_func i64 @_Z13get_global_idj(i32 0)
   %conv = trunc i64 %call to i32
-  store i32 %conv, i32* %n, align 4
-  %0 = load i32, i32* %n, align 4
+  store i32 %conv, ptr %n, align 4
+  %0 = load i32, ptr %n, align 4
   %idxprom = sext i32 %0 to i64
-  %1 = load i32 addrspace(1)*, i32 addrspace(1)** %in.addr, align 8
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %1, i64 %idxprom
-  %2 = load i32, i32 addrspace(1)* %arrayidx, align 4
+  %1 = load ptr addrspace(1), ptr %in.addr, align 8
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %1, i64 %idxprom
+  %2 = load i32, ptr addrspace(1) %arrayidx, align 4
   %call1 = call spir_func i32 @myabs(i32 %2)
-  %3 = load i32, i32* %n, align 4
+  %3 = load i32, ptr %n, align 4
   %idxprom2 = sext i32 %3 to i64
-  %4 = load i32 addrspace(1)*, i32 addrspace(1)** %out.addr, align 8
-  %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %4, i64 %idxprom2
-  store i32 %call1, i32 addrspace(1)* %arrayidx3, align 4
+  %4 = load ptr addrspace(1), ptr %out.addr, align 8
+  %arrayidx3 = getelementptr inbounds i32, ptr addrspace(1) %4, i64 %idxprom2
+  store i32 %call1, ptr addrspace(1) %arrayidx3, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/sitofp-with-bool.ll b/llvm/test/CodeGen/SPIRV/sitofp-with-bool.ll
index 2261778658657..0bf3f9f2872f2 100644
--- a/llvm/test/CodeGen/SPIRV/sitofp-with-bool.ll
+++ b/llvm/test/CodeGen/SPIRV/sitofp-with-bool.ll
@@ -19,6 +19,6 @@ define dso_local spir_kernel void @K(ptr addrspace(1) nocapture %A, i32 %B) loca
 entry:
   %cmp = icmp sgt i32 %B, 0
   %conv = sitofp i1 %cmp to float
-  store float %conv, float addrspace(1)* %A, align 4
+  store float %conv, ptr addrspace(1) %A, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/spec_const_decoration.ll b/llvm/test/CodeGen/SPIRV/spec_const_decoration.ll
index d897ccd02ed96..2e3b1167ccdeb 100644
--- a/llvm/test/CodeGen/SPIRV/spec_const_decoration.ll
+++ b/llvm/test/CodeGen/SPIRV/spec_const_decoration.ll
@@ -12,11 +12,11 @@
 
 $_ZTS6kernel = comdat any
 
-define weak_odr dso_local spir_kernel void @_ZTS6kernel(i8 addrspace(1)* %_arg_, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_1, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_2, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_3) local_unnamed_addr comdat {
+define weak_odr dso_local spir_kernel void @_ZTS6kernel(ptr addrspace(1) %_arg_, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_1, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_2, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_3) local_unnamed_addr comdat {
 entry:
-  %0 = getelementptr inbounds %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range", %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* %_arg_3, i64 0, i32 0, i32 0, i64 0
-  %1 = addrspacecast i64* %0 to i64 addrspace(4)*
-  %2 = load i64, i64 addrspace(4)* %1, align 8
+  %0 = getelementptr inbounds %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range", ptr %_arg_3, i64 0, i32 0, i32 0, i64 0
+  %1 = addrspacecast ptr %0 to ptr addrspace(4)
+  %2 = load i64, ptr addrspace(4) %1, align 8
   br label %for.cond.i.i
 
 for.cond.i.i:                                     ; preds = %for.body.i.i, %entry
@@ -29,9 +29,9 @@ for.body.i.i:                                     ; preds = %for.cond.i.i
   br label %for.cond.i.i
 
 _ZZZ4mainENKUlRN2cl4sycl7handlerEE_clES2_ENKUlNS0_14kernel_handlerEE_clES4_.exit: ; preds = %for.cond.i.i
-  %add.ptr.i = getelementptr inbounds i8, i8 addrspace(1)* %_arg_, i64 %2
-  %arrayidx.ascast.i.i = addrspacecast i8 addrspace(1)* %add.ptr.i to i8 addrspace(4)*
-  store i8 %value.0.i.i, i8 addrspace(4)* %arrayidx.ascast.i.i, align 1
+  %add.ptr.i = getelementptr inbounds i8, ptr addrspace(1) %_arg_, i64 %2
+  %arrayidx.ascast.i.i = addrspacecast ptr addrspace(1) %add.ptr.i to ptr addrspace(4)
+  store i8 %value.0.i.i, ptr addrspace(4) %arrayidx.ascast.i.i, align 1
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/spirv-load-store.ll b/llvm/test/CodeGen/SPIRV/spirv-load-store.ll
index 9188617312466..ae63e9c00cad3 100644
--- a/llvm/test/CodeGen/SPIRV/spirv-load-store.ll
+++ b/llvm/test/CodeGen/SPIRV/spirv-load-store.ll
@@ -10,12 +10,12 @@
 ; CHECK: %[[#PTRTOFLOAT:]] = OpBitcast %[[#TYFLOATPTR]] %[[#PTRTOLONG]]
 ; CHECK: OpLoad %[[#TYFLOAT]] %[[#PTRTOFLOAT]]
 
-define weak_odr dso_local spir_kernel void @foo(i32 addrspace(1)* %var) {
+define weak_odr dso_local spir_kernel void @foo(ptr addrspace(1) %var) {
 entry:
-  tail call spir_func void @_Z13__spirv_StorePiiii(i32 addrspace(1)* %var, i32 42, i32 3, i32 4)
-  %value = tail call spir_func double @_Z12__spirv_LoadPi(i32 addrspace(1)* %var)
+  tail call spir_func void @_Z13__spirv_StorePiiii(ptr addrspace(1) %var, i32 42, i32 3, i32 4)
+  %value = tail call spir_func double @_Z12__spirv_LoadPi(ptr addrspace(1) %var)
   ret void
 }
 
-declare dso_local spir_func double @_Z12__spirv_LoadPi(i32 addrspace(1)*) local_unnamed_addr
-declare dso_local spir_func void @_Z13__spirv_StorePiiii(i32 addrspace(1)*, i32, i32, i32) local_unnamed_addr
+declare dso_local spir_func double @_Z12__spirv_LoadPi(ptr addrspace(1)) local_unnamed_addr
+declare dso_local spir_func void @_Z13__spirv_StorePiiii(ptr addrspace(1), i32, i32, i32) local_unnamed_addr
diff --git a/llvm/test/CodeGen/SPIRV/spirv-tools-dis.ll b/llvm/test/CodeGen/SPIRV/spirv-tools-dis.ll
index e6cbf7644408a..c86513d78cb87 100644
--- a/llvm/test/CodeGen/SPIRV/spirv-tools-dis.ll
+++ b/llvm/test/CodeGen/SPIRV/spirv-tools-dis.ll
@@ -3,11 +3,11 @@
 ; CHECK: %[[#]] = OpExtInstImport "OpenCL.std"
 ; CHECK: %[[#]] = OpTypeInt 32 0
 
-define spir_kernel void @foo(i32 addrspace(1)* %a) {
+define spir_kernel void @foo(ptr addrspace(1) %a) {
 entry:
-  %a.addr = alloca i32 addrspace(1)*, align 4
-  store i32 addrspace(1)* %a, i32 addrspace(1)** %a.addr, align 4
-  %0 = load i32 addrspace(1)*, i32 addrspace(1)** %a.addr, align 4
-  store i32 0, i32 addrspace(1)* %0, align 4
+  %a.addr = alloca ptr addrspace(1), align 4
+  store ptr addrspace(1) %a, ptr %a.addr, align 4
+  %0 = load ptr addrspace(1), ptr %a.addr, align 4
+  store i32 0, ptr addrspace(1) %0, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll b/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
index fb550bb01a3a2..c99faf87f7241 100644
--- a/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
+++ b/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
 
-define spir_kernel void @k(i32 addrspace(1)* %a) !kernel_arg_type_qual !7 !spirv.ParameterDecorations !10 {
+define spir_kernel void @k(ptr addrspace(1) %a) !kernel_arg_type_qual !7 !spirv.ParameterDecorations !10 {
 entry:
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/store.ll b/llvm/test/CodeGen/SPIRV/store.ll
index 386a605dc4aa5..0bad0a579e0ae 100644
--- a/llvm/test/CodeGen/SPIRV/store.ll
+++ b/llvm/test/CodeGen/SPIRV/store.ll
@@ -1,12 +1,12 @@
 ; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
 
 ; CHECK: "foo"
-define spir_kernel void @foo(i32 addrspace(1)* %a) {
+define spir_kernel void @foo(ptr addrspace(1) %a) {
 entry:
-  %a.addr = alloca i32 addrspace(1)*, align 4
-  store i32 addrspace(1)* %a, i32 addrspace(1)** %a.addr, align 4
-  %0 = load i32 addrspace(1)*, i32 addrspace(1)** %a.addr, align 4
+  %a.addr = alloca ptr addrspace(1), align 4
+  store ptr addrspace(1) %a, ptr %a.addr, align 4
+  %0 = load ptr addrspace(1), ptr %a.addr, align 4
 ; CHECK: OpStore %[[#]] %[[#]] Aligned 4
-  store i32 0, i32 addrspace(1)* %0, align 4
+  store i32 0, ptr addrspace(1) %0, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
index 24ef0bf425d21..d1414233fb308 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/AtomicCompareExchangeExplicit_cl20.ll
@@ -40,21 +40,21 @@
 ; CHECK-SPIRV: %[[#]] = OpAtomicCompareExchangeWeak %[[#]] %[[#]] %[[#DeviceScope]] %[[#ReleaseMemSem]] %[[#RelaxedMemSem]]
 ; CHECK-SPIRV: %[[#]] = OpAtomicCompareExchangeWeak %[[#]] %[[#]] %[[#WorkgroupScope]] %[[#AcqRelMemSem]] %[[#RelaxedMemSem]]
 
-define dso_local spir_kernel void @testAtomicCompareExchangeExplicit_cl20(i32 addrspace(1)* noundef %object, i32 addrspace(1)* noundef %expected, i32 noundef %desired) local_unnamed_addr {
+define dso_local spir_kernel void @testAtomicCompareExchangeExplicit_cl20(ptr addrspace(1) noundef %object, ptr addrspace(1) noundef %expected, i32 noundef %desired) local_unnamed_addr {
 entry:
-  %0 = addrspacecast i32 addrspace(1)* %object to i32 addrspace(4)*
-  %1 = addrspacecast i32 addrspace(1)* %expected to i32 addrspace(4)*
-  %call = call spir_func zeroext i1 @_Z39atomic_compare_exchange_strong_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_(i32 addrspace(4)* noundef %0, i32 addrspace(4)* noundef %1, i32 noundef %desired, i32 noundef 3, i32 noundef 0)
-  %call1 = call spir_func zeroext i1 @_Z39atomic_compare_exchange_strong_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_12memory_scope(i32 addrspace(4)* noundef %0, i32 addrspace(4)* noundef %1, i32 noundef %desired, i32 noundef 4, i32 noundef 0, i32 noundef 1)
-  %call2 = call spir_func zeroext i1 @_Z37atomic_compare_exchange_weak_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_(i32 addrspace(4)* noundef %0, i32 addrspace(4)* noundef %1, i32 noundef %desired, i32 noundef 3, i32 noundef 0)
-  %call3 = call spir_func zeroext i1 @_Z37atomic_compare_exchange_weak_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_12memory_scope(i32 addrspace(4)* noundef %0, i32 addrspace(4)* noundef %1, i32 noundef %desired, i32 noundef 4, i32 noundef 0, i32 noundef 1)
+  %0 = addrspacecast ptr addrspace(1) %object to ptr addrspace(4)
+  %1 = addrspacecast ptr addrspace(1) %expected to ptr addrspace(4)
+  %call = call spir_func zeroext i1 @_Z39atomic_compare_exchange_strong_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_(ptr addrspace(4) noundef %0, ptr addrspace(4) noundef %1, i32 noundef %desired, i32 noundef 3, i32 noundef 0)
+  %call1 = call spir_func zeroext i1 @_Z39atomic_compare_exchange_strong_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_12memory_scope(ptr addrspace(4) noundef %0, ptr addrspace(4) noundef %1, i32 noundef %desired, i32 noundef 4, i32 noundef 0, i32 noundef 1)
+  %call2 = call spir_func zeroext i1 @_Z37atomic_compare_exchange_weak_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_(ptr addrspace(4) noundef %0, ptr addrspace(4) noundef %1, i32 noundef %desired, i32 noundef 3, i32 noundef 0)
+  %call3 = call spir_func zeroext i1 @_Z37atomic_compare_exchange_weak_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_12memory_scope(ptr addrspace(4) noundef %0, ptr addrspace(4) noundef %1, i32 noundef %desired, i32 noundef 4, i32 noundef 0, i32 noundef 1)
   ret void
 }
 
-declare spir_func zeroext i1 @_Z39atomic_compare_exchange_strong_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_(i32 addrspace(4)* noundef, i32 addrspace(4)* noundef, i32 noundef, i32 noundef, i32 noundef) local_unnamed_addr
+declare spir_func zeroext i1 @_Z39atomic_compare_exchange_strong_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_(ptr addrspace(4) noundef, ptr addrspace(4) noundef, i32 noundef, i32 noundef, i32 noundef) local_unnamed_addr
 
-declare spir_func zeroext i1 @_Z39atomic_compare_exchange_strong_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_12memory_scope(i32 addrspace(4)* noundef, i32 addrspace(4)* noundef, i32 noundef, i32 noundef, i32 noundef, i32 noundef) local_unnamed_addr
+declare spir_func zeroext i1 @_Z39atomic_compare_exchange_strong_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_12memory_scope(ptr addrspace(4) noundef, ptr addrspace(4) noundef, i32 noundef, i32 noundef, i32 noundef, i32 noundef) local_unnamed_addr
 
-declare spir_func zeroext i1 @_Z37atomic_compare_exchange_weak_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_(i32 addrspace(4)* noundef, i32 addrspace(4)* noundef, i32 noundef, i32 noundef, i32 noundef) local_unnamed_addr
+declare spir_func zeroext i1 @_Z37atomic_compare_exchange_weak_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_(ptr addrspace(4) noundef, ptr addrspace(4) noundef, i32 noundef, i32 noundef, i32 noundef) local_unnamed_addr
 
-declare spir_func zeroext i1 @_Z37atomic_compare_exchange_weak_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_12memory_scope(i32 addrspace(4)* noundef, i32 addrspace(4)* noundef, i32 noundef, i32 noundef, i32 noundef, i32 noundef) local_unnamed_addr
+declare spir_func zeroext i1 @_Z37atomic_compare_exchange_weak_explicitPU3AS4VU7_AtomiciPU3AS4ii12memory_orderS4_12memory_scope(ptr addrspace(4) noundef, ptr addrspace(4) noundef, i32 noundef, i32 noundef, i32 noundef, i32 noundef) local_unnamed_addr
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll b/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
index 11b0578a0c9c0..fa100e5e68895 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BitReversePref.ll
@@ -15,20 +15,20 @@ entry:
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 define linkonce_odr dso_local spir_func i32 @_Z10BitReversei(i32 %value) comdat {
 entry:
   %value.addr = alloca i32, align 4
   %reversed = alloca i32, align 4
-  store i32 %value, i32* %value.addr, align 4
-  %0 = bitcast i32* %reversed to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
-  store i32 0, i32* %reversed, align 4
-  %1 = load i32, i32* %reversed, align 4
-  %2 = bitcast i32* %reversed to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %2)
+  store i32 %value, ptr %value.addr, align 4
+  %0 = bitcast ptr %reversed to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %0)
+  store i32 0, ptr %reversed, align 4
+  %1 = load i32, ptr %reversed, align 4
+  %2 = bitcast ptr %reversed to ptr
+  call void @llvm.lifetime.end.p0(i64 4, ptr %2)
   ret i32 %1
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
index 788767fd6381c..9258a3f23bf79 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange.ll
@@ -10,8 +10,8 @@
 
 define spir_kernel void @test() {
   %ndrange = alloca %struct.ndrange_t, align 4
-  call spir_func void @_Z10ndrange_1Djj(%struct.ndrange_t* sret(%struct.ndrange_t*) %ndrange, i32 123, i32 456)
+  call spir_func void @_Z10ndrange_1Djj(ptr sret(ptr) %ndrange, i32 123, i32 456)
   ret void
 }
 
-declare spir_func void @_Z10ndrange_1Djj(%struct.ndrange_t* sret(%struct.ndrange_t*), i32, i32)
+declare spir_func void @_Z10ndrange_1Djj(ptr sret(ptr), i32, i32)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
index 65c992c9b28ed..8ba5e99df52ae 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/BuildNDRange_2.ll
@@ -56,28 +56,28 @@ entry:
   %tmp = alloca %struct.ndrange_t, align 8
   %lsize3 = alloca [3 x i64], align 8
   %tmp3 = alloca %struct.ndrange_t, align 8
-  %0 = bitcast [2 x i64]* %lsize2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 bitcast ([2 x i64]* @test_ndrange_2D3D.lsize2 to i8*), i64 16, i1 false)
-  %arraydecay = getelementptr inbounds [2 x i64], [2 x i64]* %lsize2, i64 0, i64 0
-  call spir_func void @_Z10ndrange_2DPKm(%struct.ndrange_t* sret(%struct.ndrange_t*) %tmp, i64* %arraydecay)
-  %1 = bitcast [3 x i64]* %lsize3 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %1, i8* align 8 bitcast ([3 x i64]* @test_ndrange_2D3D.lsize3 to i8*), i64 24, i1 false)
-  %arraydecay2 = getelementptr inbounds [3 x i64], [3 x i64]* %lsize3, i64 0, i64 0
-  call spir_func void @_Z10ndrange_3DPKm(%struct.ndrange_t* sret(%struct.ndrange_t*) %tmp3, i64* %arraydecay2)
+  %0 = bitcast ptr %lsize2 to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %0, ptr align 8 @test_ndrange_2D3D.lsize2, i64 16, i1 false)
+  %arraydecay = getelementptr inbounds [2 x i64], ptr %lsize2, i64 0, i64 0
+  call spir_func void @_Z10ndrange_2DPKm(ptr sret(ptr) %tmp, ptr %arraydecay)
+  %1 = bitcast ptr %lsize3 to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %1, ptr align 8 @test_ndrange_2D3D.lsize3, i64 24, i1 false)
+  %arraydecay2 = getelementptr inbounds [3 x i64], ptr %lsize3, i64 0, i64 0
+  call spir_func void @_Z10ndrange_3DPKm(ptr sret(ptr) %tmp3, ptr %arraydecay2)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
 
-declare spir_func void @_Z10ndrange_2DPKm(%struct.ndrange_t* sret(%struct.ndrange_t*), i64*)
+declare spir_func void @_Z10ndrange_2DPKm(ptr sret(ptr), ptr)
 
-declare spir_func void @_Z10ndrange_3DPKm(%struct.ndrange_t* sret(%struct.ndrange_t*), i64*)
+declare spir_func void @_Z10ndrange_3DPKm(ptr sret(ptr), ptr)
 
 define spir_func void @test_ndrange_const_2D3D() {
 entry:
   %tmp = alloca %struct.ndrange_t, align 8
   %tmp1 = alloca %struct.ndrange_t, align 8
-  call spir_func void @_Z10ndrange_2DPKm(%struct.ndrange_t* sret(%struct.ndrange_t*) %tmp, i64* getelementptr inbounds ([2 x i64], [2 x i64]* @test_ndrange_2D3D.lsize2, i64 0, i64 0))
-  call spir_func void @_Z10ndrange_3DPKm(%struct.ndrange_t* sret(%struct.ndrange_t*) %tmp1, i64* getelementptr inbounds ([3 x i64], [3 x i64]* @test_ndrange_2D3D.lsize3, i64 0, i64 0))
+  call spir_func void @_Z10ndrange_2DPKm(ptr sret(ptr) %tmp, ptr @test_ndrange_2D3D.lsize2)
+  call spir_func void @_Z10ndrange_3DPKm(ptr sret(ptr) %tmp1, ptr @test_ndrange_2D3D.lsize3)
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll b/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
index 93aecc5331aa4..3f92c4c10a570 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/ConvertPtr.ll
@@ -7,11 +7,11 @@
 
 ; CHECK-SPIRV: OpConvertPtrToU
 
-define dso_local spir_kernel void @testConvertPtrToU(i32 addrspace(1)* noundef %a, i64 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testConvertPtrToU(ptr addrspace(1) noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %0 = ptrtoint i32 addrspace(1)* %a to i32
+  %0 = ptrtoint ptr addrspace(1) %a to i32
   %1 = zext i32 %0 to i64
-  store i64 %1, i64 addrspace(1)* %res, align 8
+  store i64 %1, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -25,7 +25,7 @@ entry:
 define dso_local spir_kernel void @testConvertUToPtr(i64 noundef %a) local_unnamed_addr {
 entry:
   %conv = trunc i64 %a to i32
-  %0 = inttoptr i32 %conv to i32 addrspace(1)*
-  store i32 0, i32 addrspace(1)* %0, align 4
+  %0 = inttoptr i32 %conv to ptr addrspace(1)
+  store i32 0, ptr addrspace(1) %0, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll b/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
index d4fc5c3280b71..532cb04549643 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DecorationAlignment.ll
@@ -7,6 +7,6 @@
 %struct._ZTS6Struct.Struct = type { %struct._ZTS11floatStruct.floatStruct, %struct._ZTS11floatStruct.floatStruct }
 %struct._ZTS11floatStruct.floatStruct = type { float, float, float, float }
 
-define spir_func void @_ZN3FooC2Ev(%struct._ZTS6Struct.Struct addrspace(4)* align 16 %0) {
+define spir_func void @_ZN3FooC2Ev(ptr addrspace(4) align 16 %0) {
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll b/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
index 966d83516bb3a..b03e00181bde7 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DecorationMaxByteOffset.ll
@@ -10,14 +10,14 @@
 ; CHECK-SPIRV:     %[[#PTR_ID]] = OpFunctionParameter %[[#CHAR_PTR_T]]
 ; CHECK-SPIRV:     %[[#PTR2_ID]] = OpFunctionParameter %[[#CHAR_PTR_T]]
 
-define spir_kernel void @worker(i8 addrspace(3)* dereferenceable(12) %ptr) {
+define spir_kernel void @worker(ptr addrspace(3) dereferenceable(12) %ptr) {
 entry:
-  %ptr.addr = alloca i8 addrspace(3)*, align 4
-  store i8 addrspace(3)* %ptr, i8 addrspace(3)** %ptr.addr, align 4
+  %ptr.addr = alloca ptr addrspace(3), align 4
+  store ptr addrspace(3) %ptr, ptr %ptr.addr, align 4
   ret void
 }
 
-define spir_func void @not_a_kernel(i8 addrspace(3)* dereferenceable(123) %ptr2) {
+define spir_func void @not_a_kernel(ptr addrspace(3) dereferenceable(123) %ptr2) {
 entry:
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll b/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
index 67c3380941887..c6a032d695e41 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/DivRem.ll
@@ -16,10 +16,10 @@
 ;;   res[0] = a / b;
 ;; }
 
-define dso_local spir_kernel void @testSDiv(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testSDiv(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %div = sdiv <2 x i32> %a, %b
-  store <2 x i32> %div, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %div, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -33,10 +33,10 @@ entry:
 ;;   res[0] = a / b;
 ;; }
 
-define dso_local spir_kernel void @testUDiv(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testUDiv(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %div = udiv <2 x i32> %a, %b
-  store <2 x i32> %div, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %div, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -50,10 +50,10 @@ entry:
 ;;   res[0] = a / b;
 ;; }
 
-define dso_local spir_kernel void @testFDiv(<2 x float> noundef %a, <2 x float> noundef %b, <2 x float> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFDiv(<2 x float> noundef %a, <2 x float> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %div = fdiv <2 x float> %a, %b
-  store <2 x float> %div, <2 x float> addrspace(1)* %res, align 8
+  store <2 x float> %div, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -67,10 +67,10 @@ entry:
 ;;   res[0] = a % b;
 ;; }
 
-define dso_local spir_kernel void @testSRem(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testSRem(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %rem = srem <2 x i32> %a, %b
-  store <2 x i32> %rem, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %rem, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -84,9 +84,9 @@ entry:
 ;;   res[0] = a % b;
 ;; }
 
-define dso_local spir_kernel void @testUMod(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testUMod(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %rem = urem <2 x i32> %a, %b
-  store <2 x i32> %rem, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %rem, ptr addrspace(1) %res, align 8
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll b/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
index d17e228c2ef88..970d752fbc425 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/GlobalFunAnnotate.ll
@@ -5,7 +5,7 @@
 
 @.str = private unnamed_addr constant [23 x i8] c"annotation_on_function\00", section "llvm.metadata"
 @.str.1 = private unnamed_addr constant [6 x i8] c"an.cl\00", section "llvm.metadata"
- at llvm.global.annotations = appending global [1 x { i8*, i8*, i8*, i32, i8* }] [{ i8*, i8*, i8*, i32, i8* } { i8* bitcast (void ()* @foo to i8*), i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.1, i32 0, i32 0), i32 2, i8* null }], section "llvm.metadata"
+ at llvm.global.annotations = appending global [1 x { ptr, ptr, ptr, i32, ptr }] [{ ptr, ptr, ptr, i32, ptr } { ptr @foo, ptr @.str, ptr @.str.1, i32 2, ptr null }], section "llvm.metadata"
 
 define dso_local spir_func void @foo() {
 entry:
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpAllAny.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpAllAny.ll
index 113e4d7ac1a01..52c893c881e8a 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpAllAny.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpAllAny.ll
@@ -13,7 +13,7 @@
 ; CHECK-SPIRV: OpAll %[[#BoolTypeID]]
 ; CHECK-SPIRV: OpAll %[[#BoolTypeID]]
 
-define dso_local spir_func void @test_vector(i32 addrspace(4)* nocapture writeonly %out, <2 x i8> %c, <2 x i16> %s, <2 x i32> %i, <2 x i64> %l) local_unnamed_addr {
+define dso_local spir_func void @test_vector(ptr addrspace(4) nocapture writeonly %out, <2 x i8> %c, <2 x i16> %s, <2 x i32> %i, <2 x i64> %l) local_unnamed_addr {
 entry:
   %call = tail call spir_func i32 @_Z3anyDv2_c(<2 x i8> %c)
   %call1 = tail call spir_func i32 @_Z3anyDv2_s(<2 x i16> %s)
@@ -30,7 +30,7 @@ entry:
   %add11 = add nsw i32 %add9, %call10
   %call12 = tail call spir_func i32 @_Z3allDv2_l(<2 x i64> %l)
   %add13 = add nsw i32 %add11, %call12
-  store i32 %add13, i32 addrspace(4)* %out, align 4
+  store i32 %add13, ptr addrspace(4) %out, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpConstantBool.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpConstantBool.ll
index c0d61954de45b..2a3859df25229 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpConstantBool.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpConstantBool.ll
@@ -13,13 +13,13 @@ entry:
   ret i1 false
 }
 
-define spir_kernel void @test(i32 addrspace(1)* %i) {
+define spir_kernel void @test(ptr addrspace(1) %i) {
 entry:
-  %i.addr = alloca i32 addrspace(1)*, align 4
-  store i32 addrspace(1)* %i, i32 addrspace(1)** %i.addr, align 4
+  %i.addr = alloca ptr addrspace(1), align 4
+  store ptr addrspace(1) %i, ptr %i.addr, align 4
   %call = call spir_func zeroext i1 @f()
   %conv = zext i1 %call to i32
-  %0 = load i32 addrspace(1)*, i32 addrspace(1)** %i.addr, align 4
-  store i32 %conv, i32 addrspace(1)* %0, align 4
+  %0 = load ptr addrspace(1), ptr %i.addr, align 4
+  store i32 %conv, ptr addrspace(1) %0, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll
index c761e7729d25e..519544fc40d24 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpDot.ll
@@ -14,10 +14,10 @@
 ; CHECK-SPIRV-NOT:   %[[#]] = OpDot %[[#]] %[[#]] %[[#]]
 ; CHECK-SPIRV:       OpFunctionEnd
 
-define spir_kernel void @testScalar(float %f, float addrspace(1)* %out) {
+define spir_kernel void @testScalar(float %f, ptr addrspace(1) %out) {
 entry:
   %call = tail call spir_func float @_Z3dotff(float %f, float %f)
-  store float %call, float addrspace(1)* %out
+  store float %call, ptr addrspace(1) %out
   ret void
 }
 
@@ -29,14 +29,14 @@ entry:
 ; CHECK-SPIRV:       %[[#]] = OpDot %[[#TyHalf]] %[[#]] %[[#]]
 ; CHECK-SPIRV:       OpFunctionEnd
 
-define spir_kernel void @testVector(<2 x float> %f, <2 x half> %h, float addrspace(1)* %out, half addrspace(1)* %outh) {
+define spir_kernel void @testVector(<2 x float> %f, <2 x half> %h, ptr addrspace(1) %out, ptr addrspace(1) %outh) {
 entry:
   %call = tail call spir_func float @_Z3dotDv2_fS_(<2 x float> %f, <2 x float> %f)
-  store float %call, float addrspace(1)* %out
+  store float %call, ptr addrspace(1) %out
   %call2 = tail call spir_func float @__spirv_Dot(<2 x float> %f, <2 x float> %f)
-  store float %call2, float addrspace(1)* %out
+  store float %call2, ptr addrspace(1) %out
   %call3 = tail call spir_func half @_Z11__spirv_DotDv2_DF16_S_(<2 x half> %h, <2 x half> %h)
-  store half %call3, half addrspace(1)* %outh
+  store half %call3, ptr addrspace(1) %outh
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpGroupAllAny.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpGroupAllAny.ll
index 0b420697763ba..2a503a189906d 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpGroupAllAny.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpGroupAllAny.ll
@@ -14,7 +14,7 @@
 ; CHECK-SPIRV: %[[#]] = OpGroupAny %[[#BoolTypeID]] %[[#]] %[[#True]]
 ; CHECK-SPIRV: %[[#]] = OpGroupAll %[[#BoolTypeID]] %[[#]] %[[#True]]
 ; CHECK-SPIRV: %[[#]] = OpGroupAny %[[#BoolTypeID]] %[[#]] %[[#False]]
-define spir_kernel void @test(i32 addrspace(1)* nocapture readnone %i) {
+define spir_kernel void @test(ptr addrspace(1) nocapture readnone %i) {
 entry:
   %call = tail call spir_func i32 @_Z14work_group_alli(i32 5)
   %call1 = tail call spir_func i32 @_Z14work_group_anyi(i32 5)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpGroupAsyncCopy.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpGroupAsyncCopy.ll
index 11b613b956e06..3e68161105cc1 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpGroupAsyncCopy.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpGroupAsyncCopy.ll
@@ -8,95 +8,95 @@
 
 %opencl.event_t = type opaque
 
-define spir_kernel void @test_fn(<2 x i8> addrspace(1)* %src, <2 x i8> addrspace(1)* %dst, <2 x i8> addrspace(3)* %localBuffer, i32 %copiesPerWorkgroup, i32 %copiesPerWorkItem) {
+define spir_kernel void @test_fn(ptr addrspace(1) %src, ptr addrspace(1) %dst, ptr addrspace(3) %localBuffer, i32 %copiesPerWorkgroup, i32 %copiesPerWorkItem) {
 entry:
-  %src.addr = alloca <2 x i8> addrspace(1)*, align 4
-  %dst.addr = alloca <2 x i8> addrspace(1)*, align 4
-  %localBuffer.addr = alloca <2 x i8> addrspace(3)*, align 4
+  %src.addr = alloca ptr addrspace(1), align 4
+  %dst.addr = alloca ptr addrspace(1), align 4
+  %localBuffer.addr = alloca ptr addrspace(3), align 4
   %copiesPerWorkgroup.addr = alloca i32, align 4
   %copiesPerWorkItem.addr = alloca i32, align 4
   %i = alloca i32, align 4
-  %event = alloca %opencl.event_t*, align 4
-  store <2 x i8> addrspace(1)* %src, <2 x i8> addrspace(1)** %src.addr, align 4
-  store <2 x i8> addrspace(1)* %dst, <2 x i8> addrspace(1)** %dst.addr, align 4
-  store <2 x i8> addrspace(3)* %localBuffer, <2 x i8> addrspace(3)** %localBuffer.addr, align 4
-  store i32 %copiesPerWorkgroup, i32* %copiesPerWorkgroup.addr, align 4
-  store i32 %copiesPerWorkItem, i32* %copiesPerWorkItem.addr, align 4
-  store i32 0, i32* %i, align 4
+  %event = alloca ptr, align 4
+  store ptr addrspace(1) %src, ptr %src.addr, align 4
+  store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+  store ptr addrspace(3) %localBuffer, ptr %localBuffer.addr, align 4
+  store i32 %copiesPerWorkgroup, ptr %copiesPerWorkgroup.addr, align 4
+  store i32 %copiesPerWorkItem, ptr %copiesPerWorkItem.addr, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %0 = load i32, i32* %i, align 4
-  %1 = load i32, i32* %copiesPerWorkItem.addr, align 4
+  %0 = load i32, ptr %i, align 4
+  %1 = load i32, ptr %copiesPerWorkItem.addr, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
   %call = call spir_func i32 @_Z12get_local_idj(i32 0)
-  %2 = load i32, i32* %copiesPerWorkItem.addr, align 4
+  %2 = load i32, ptr %copiesPerWorkItem.addr, align 4
   %mul = mul i32 %call, %2
-  %3 = load i32, i32* %i, align 4
+  %3 = load i32, ptr %i, align 4
   %add = add i32 %mul, %3
-  %4 = load <2 x i8> addrspace(3)*, <2 x i8> addrspace(3)** %localBuffer.addr, align 4
-  %arrayidx = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %4, i32 %add
-  store <2 x i8> zeroinitializer, <2 x i8> addrspace(3)* %arrayidx, align 2
+  %4 = load ptr addrspace(3), ptr %localBuffer.addr, align 4
+  %arrayidx = getelementptr inbounds <2 x i8>, ptr addrspace(3) %4, i32 %add
+  store <2 x i8> zeroinitializer, ptr addrspace(3) %arrayidx, align 2
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
-  %5 = load i32, i32* %i, align 4
+  %5 = load i32, ptr %i, align 4
   %inc = add nsw i32 %5, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 for.end:                                          ; preds = %for.cond
   call spir_func void @_Z7barrierj(i32 1)
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond1
 
 for.cond1:                                        ; preds = %for.inc12, %for.end
-  %6 = load i32, i32* %i, align 4
-  %7 = load i32, i32* %copiesPerWorkItem.addr, align 4
+  %6 = load i32, ptr %i, align 4
+  %7 = load i32, ptr %copiesPerWorkItem.addr, align 4
   %cmp2 = icmp slt i32 %6, %7
   br i1 %cmp2, label %for.body3, label %for.end14
 
 for.body3:                                        ; preds = %for.cond1
   %call4 = call spir_func i32 @_Z13get_global_idj(i32 0)
-  %8 = load i32, i32* %copiesPerWorkItem.addr, align 4
+  %8 = load i32, ptr %copiesPerWorkItem.addr, align 4
   %mul5 = mul i32 %call4, %8
-  %9 = load i32, i32* %i, align 4
+  %9 = load i32, ptr %i, align 4
   %add6 = add i32 %mul5, %9
-  %10 = load <2 x i8> addrspace(1)*, <2 x i8> addrspace(1)** %src.addr, align 4
-  %arrayidx7 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %10, i32 %add6
-  %11 = load <2 x i8>, <2 x i8> addrspace(1)* %arrayidx7, align 2
+  %10 = load ptr addrspace(1), ptr %src.addr, align 4
+  %arrayidx7 = getelementptr inbounds <2 x i8>, ptr addrspace(1) %10, i32 %add6
+  %11 = load <2 x i8>, ptr addrspace(1) %arrayidx7, align 2
   %call8 = call spir_func i32 @_Z12get_local_idj(i32 0)
-  %12 = load i32, i32* %copiesPerWorkItem.addr, align 4
+  %12 = load i32, ptr %copiesPerWorkItem.addr, align 4
   %mul9 = mul i32 %call8, %12
-  %13 = load i32, i32* %i, align 4
+  %13 = load i32, ptr %i, align 4
   %add10 = add i32 %mul9, %13
-  %14 = load <2 x i8> addrspace(3)*, <2 x i8> addrspace(3)** %localBuffer.addr, align 4
-  %arrayidx11 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %14, i32 %add10
-  store <2 x i8> %11, <2 x i8> addrspace(3)* %arrayidx11, align 2
+  %14 = load ptr addrspace(3), ptr %localBuffer.addr, align 4
+  %arrayidx11 = getelementptr inbounds <2 x i8>, ptr addrspace(3) %14, i32 %add10
+  store <2 x i8> %11, ptr addrspace(3) %arrayidx11, align 2
   br label %for.inc12
 
 for.inc12:                                        ; preds = %for.body3
-  %15 = load i32, i32* %i, align 4
+  %15 = load i32, ptr %i, align 4
   %inc13 = add nsw i32 %15, 1
-  store i32 %inc13, i32* %i, align 4
+  store i32 %inc13, ptr %i, align 4
   br label %for.cond1
 
 for.end14:                                        ; preds = %for.cond1
   call spir_func void @_Z7barrierj(i32 1)
-  %16 = load <2 x i8> addrspace(1)*, <2 x i8> addrspace(1)** %dst.addr, align 4
-  %17 = load i32, i32* %copiesPerWorkgroup.addr, align 4
+  %16 = load ptr addrspace(1), ptr %dst.addr, align 4
+  %17 = load i32, ptr %copiesPerWorkgroup.addr, align 4
   %call15 = call spir_func i32 @_Z12get_group_idj(i32 0)
   %mul16 = mul i32 %17, %call15
-  %add.ptr = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %16, i32 %mul16
-  %18 = load <2 x i8> addrspace(3)*, <2 x i8> addrspace(3)** %localBuffer.addr, align 4
-  %19 = load i32, i32* %copiesPerWorkgroup.addr, align 4
-  %call17 = call spir_func %opencl.event_t* @_Z21async_work_group_copyPU3AS1Dv2_cPKU3AS3S_j9ocl_event(<2 x i8> addrspace(1)* %add.ptr, <2 x i8> addrspace(3)* %18, i32 %19, %opencl.event_t* null)
-  store %opencl.event_t* %call17, %opencl.event_t** %event, align 4
-  %20 = addrspacecast %opencl.event_t** %event to %opencl.event_t* addrspace(4)*
-  call spir_func void @_Z17wait_group_eventsiPU3AS49ocl_event(i32 1, %opencl.event_t* addrspace(4)* %20)
+  %add.ptr = getelementptr inbounds <2 x i8>, ptr addrspace(1) %16, i32 %mul16
+  %18 = load ptr addrspace(3), ptr %localBuffer.addr, align 4
+  %19 = load i32, ptr %copiesPerWorkgroup.addr, align 4
+  %call17 = call spir_func ptr @_Z21async_work_group_copyPU3AS1Dv2_cPKU3AS3S_j9ocl_event(ptr addrspace(1) %add.ptr, ptr addrspace(3) %18, i32 %19, ptr null)
+  store ptr %call17, ptr %event, align 4
+  %20 = addrspacecast ptr %event to ptr addrspace(4)
+  call spir_func void @_Z17wait_group_eventsiPU3AS49ocl_event(i32 1, ptr addrspace(4) %20)
   ret void
 }
 
@@ -106,8 +106,8 @@ declare spir_func void @_Z7barrierj(i32)
 
 declare spir_func i32 @_Z13get_global_idj(i32)
 
-declare spir_func %opencl.event_t* @_Z21async_work_group_copyPU3AS1Dv2_cPKU3AS3S_j9ocl_event(<2 x i8> addrspace(1)*, <2 x i8> addrspace(3)*, i32, %opencl.event_t*)
+declare spir_func ptr @_Z21async_work_group_copyPU3AS1Dv2_cPKU3AS3S_j9ocl_event(ptr addrspace(1), ptr addrspace(3), i32, ptr)
 
 declare spir_func i32 @_Z12get_group_idj(i32)
 
-declare spir_func void @_Z17wait_group_eventsiPU3AS49ocl_event(i32, %opencl.event_t* addrspace(4)*)
+declare spir_func void @_Z17wait_group_eventsiPU3AS49ocl_event(i32, ptr addrspace(4))
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpImageQuerySize.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpImageQuerySize.ll
index 0c1f8eaa34a40..ce086497ff727 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpImageQuerySize.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpImageQuerySize.ll
@@ -12,7 +12,7 @@
 ; CHECK-SPIRV:     %[[#]] = OpImageQuerySizeLod %[[#]] %[[#ArrayVarID]]
 ; CHECK-SPIRV-NOT: %[[#]] = OpExtInst %[[#]] %[[#]] get_image_array_size
 
-define spir_kernel void @test_image1d(i32 addrspace(1)* nocapture %sizes, target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %img, target("spirv.Image", void, 5, 0, 0, 0, 0, 0, 0) %buffer, target("spirv.Image", void, 0, 0, 1, 0, 0, 0, 0) %array) {
+define spir_kernel void @test_image1d(ptr addrspace(1) nocapture %sizes, target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %img, target("spirv.Image", void, 5, 0, 0, 0, 0, 0, 0) %buffer, target("spirv.Image", void, 0, 0, 1, 0, 0, 0, 0) %array) {
   %1 = tail call spir_func i32 @_Z15get_image_width14ocl_image1d_ro(target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %img)
   %2 = tail call spir_func i32 @_Z15get_image_width21ocl_image1d_buffer_ro(target("spirv.Image", void, 5, 0, 0, 0, 0, 0, 0) %buffer)
   %3 = tail call spir_func i32 @_Z15get_image_width20ocl_image1d_array_ro(target("spirv.Image", void, 0, 0, 1, 0, 0, 0, 0) %array)
@@ -21,7 +21,7 @@ define spir_kernel void @test_image1d(i32 addrspace(1)* nocapture %sizes, target
   %6 = add nsw i32 %2, %1
   %7 = add nsw i32 %6, %3
   %8 = add nsw i32 %7, %5
-  store i32 %8, i32 addrspace(1)* %sizes, align 4
+  store i32 %8, ptr addrspace(1) %sizes, align 4
   ret void
 }
 
@@ -33,7 +33,7 @@ declare spir_func i32 @_Z15get_image_width20ocl_image1d_array_ro(target("spirv.I
 
 declare spir_func i64 @_Z20get_image_array_size20ocl_image1d_array_ro(target("spirv.Image", void, 0, 0, 1, 0, 0, 0, 0))
 
-define spir_kernel void @test_image2d(i32 addrspace(1)* nocapture %sizes, target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %img, target("spirv.Image", void, 1, 1, 0, 0, 0, 0, 0) %img_depth, target("spirv.Image", void, 1, 0, 1, 0, 0, 0, 0) %array, target("spirv.Image", void, 1, 1, 1, 0, 0, 0, 0) %array_depth) {
+define spir_kernel void @test_image2d(ptr addrspace(1) nocapture %sizes, target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %img, target("spirv.Image", void, 1, 1, 0, 0, 0, 0, 0) %img_depth, target("spirv.Image", void, 1, 0, 1, 0, 0, 0, 0) %array, target("spirv.Image", void, 1, 1, 1, 0, 0, 0, 0) %array_depth) {
   %1 = tail call spir_func i32 @_Z15get_image_width14ocl_image2d_ro(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %img)
   %2 = tail call spir_func i32 @_Z16get_image_height14ocl_image2d_ro(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %img)
   %3 = tail call spir_func <2 x i32> @_Z13get_image_dim14ocl_image2d_ro(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %img)
@@ -54,7 +54,7 @@ define spir_kernel void @test_image2d(i32 addrspace(1)* nocapture %sizes, target
   %18 = add nsw i32 %16, %17
   %19 = extractelement <2 x i32> %8, i32 1
   %20 = add nsw i32 %18, %19
-  store i32 %20, i32 addrspace(1)* %sizes, align 4
+  store i32 %20, ptr addrspace(1) %sizes, align 4
   ret void
 }
 
@@ -72,7 +72,7 @@ declare spir_func i64 @_Z20get_image_array_size20ocl_image2d_array_ro(target("sp
 
 declare spir_func <2 x i32> @_Z13get_image_dim20ocl_image2d_array_ro(target("spirv.Image", void, 1, 0, 1, 0, 0, 0, 0))
 
-define spir_kernel void @test_image3d(i32 addrspace(1)* nocapture %sizes, target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0) %img) {
+define spir_kernel void @test_image3d(ptr addrspace(1) nocapture %sizes, target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0) %img) {
   %1 = tail call spir_func i32 @_Z15get_image_width14ocl_image3d_ro(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0) %img)
   %2 = tail call spir_func i32 @_Z16get_image_height14ocl_image3d_ro(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0) %img)
   %3 = tail call spir_func i32 @_Z15get_image_depth14ocl_image3d_ro(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0) %img)
@@ -87,7 +87,7 @@ define spir_kernel void @test_image3d(i32 addrspace(1)* nocapture %sizes, target
   %12 = add nsw i32 %10, %11
   %13 = extractelement <4 x i32> %4, i32 3
   %14 = add nsw i32 %12, %13
-  store i32 %14, i32 addrspace(1)* %sizes, align 4
+  store i32 %14, ptr addrspace(1) %sizes, align 4
   ret void
 }
 
@@ -99,14 +99,14 @@ declare spir_func i32 @_Z15get_image_depth14ocl_image3d_ro(target("spirv.Image",
 
 declare spir_func <4 x i32> @_Z13get_image_dim14ocl_image3d_ro(target("spirv.Image", void, 2, 0, 0, 0, 0, 0, 0))
 
-define spir_kernel void @test_image2d_array_depth_t(i32 addrspace(1)* nocapture %sizes, target("spirv.Image", void, 1, 1, 1, 0, 0, 0, 0) %array) {
+define spir_kernel void @test_image2d_array_depth_t(ptr addrspace(1) nocapture %sizes, target("spirv.Image", void, 1, 1, 1, 0, 0, 0, 0) %array) {
   %1 = tail call spir_func i32 @_Z15get_image_width26ocl_image2d_array_depth_ro(target("spirv.Image", void, 1, 1, 1, 0, 0, 0, 0) %array)
   %2 = tail call spir_func i32 @_Z16get_image_height26ocl_image2d_array_depth_ro(target("spirv.Image", void, 1, 1, 1, 0, 0, 0, 0) %array)
   %3 = tail call spir_func i64 @_Z20get_image_array_size26ocl_image2d_array_depth_ro(target("spirv.Image", void, 1, 1, 1, 0, 0, 0, 0) %array)
   %4 = trunc i64 %3 to i32
   %5 = add nsw i32 %2, %1
   %6 = add nsw i32 %5, %4
-  store i32 %5, i32 addrspace(1)* %sizes, align 4
+  store i32 %5, ptr addrspace(1) %sizes, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpImageReadMS.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpImageReadMS.ll
index db934c2f3636f..0e822bd648c9a 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpImageReadMS.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpImageReadMS.ll
@@ -11,7 +11,7 @@
 
 ; CHECK-SPIRV: %[[#]] = OpImageRead %[[#]] %[[#]] %[[#]] Sample %[[#]]
 
-define spir_kernel void @sample_test(target("spirv.Image", void, 1, 0, 0, 1, 0, 0, 0) %source, i32 %sampler, <4 x float> addrspace(1)* nocapture %results) {
+define spir_kernel void @sample_test(target("spirv.Image", void, 1, 0, 0, 1, 0, 0, 0) %source, i32 %sampler, ptr addrspace(1) nocapture %results) {
 entry:
   %call = tail call spir_func i32 @_Z13get_global_idj(i32 0)
   %call1 = tail call spir_func i32 @_Z13get_global_idj(i32 1)
@@ -33,8 +33,8 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %tmp19 = mul i32 %tmp, %call2
   %add7 = add i32 %tmp19, %call
   %call9 = tail call spir_func <4 x float> @_Z11read_imagef19ocl_image2d_msaa_roDv2_ii(target("spirv.Image", void, 1, 0, 0, 1, 0, 0, 0) %source, <2 x i32> %vecinit8, i32 %sample.021)
-  %arrayidx = getelementptr inbounds <4 x float>, <4 x float> addrspace(1)* %results, i32 %add7
-  store <4 x float> %call9, <4 x float> addrspace(1)* %arrayidx, align 16
+  %arrayidx = getelementptr inbounds <4 x float>, ptr addrspace(1) %results, i32 %add7
+  store <4 x float> %call9, ptr addrspace(1) %arrayidx, align 16
   %inc = add nuw i32 %sample.021, 1
   %cmp = icmp ult i32 %inc, %call4
   br i1 %cmp, label %for.body, label %for.end.loopexit
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpImageSampleExplicitLod.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpImageSampleExplicitLod.ll
index c4e810c6f3a59..a9d3a3bf4f475 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpImageSampleExplicitLod.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpImageSampleExplicitLod.ll
@@ -5,7 +5,7 @@
 ; CHECK-SPIRV-DAG: %[[#RetType]] = OpTypeVector %[[#]] 4
 ; CHECK-SPIRV:     %[[#]] = OpCompositeExtract %[[#]] %[[#RetID]] 0
 
-define spir_kernel void @sample_kernel(target("spirv.Image", void, 1, 1, 0, 0, 0, 0, 0) %input, i32 %imageSampler, float addrspace(1)* %xOffsets, float addrspace(1)* %yOffsets, float addrspace(1)* %results) {
+define spir_kernel void @sample_kernel(target("spirv.Image", void, 1, 1, 0, 0, 0, 0, 0) %input, i32 %imageSampler, ptr addrspace(1) %xOffsets, ptr addrspace(1) %yOffsets, ptr addrspace(1) %results) {
 entry:
   %call = call spir_func i32 @_Z13get_global_idj(i32 0)
   %call1 = call spir_func i32 @_Z13get_global_idj(i32 1)
@@ -13,17 +13,17 @@ entry:
   %call2.old = extractelement <2 x i32> %call2.tmp1, i32 0
   %mul = mul i32 %call1, %call2.old
   %add = add i32 %mul, %call
-  %arrayidx = getelementptr inbounds float, float addrspace(1)* %xOffsets, i32 %add
-  %0 = load float, float addrspace(1)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds float, ptr addrspace(1) %xOffsets, i32 %add
+  %0 = load float, ptr addrspace(1) %arrayidx, align 4
   %conv = fptosi float %0 to i32
   %vecinit = insertelement <2 x i32> undef, i32 %conv, i32 0
-  %arrayidx3 = getelementptr inbounds float, float addrspace(1)* %yOffsets, i32 %add
-  %1 = load float, float addrspace(1)* %arrayidx3, align 4
+  %arrayidx3 = getelementptr inbounds float, ptr addrspace(1) %yOffsets, i32 %add
+  %1 = load float, ptr addrspace(1) %arrayidx3, align 4
   %conv4 = fptosi float %1 to i32
   %vecinit5 = insertelement <2 x i32> %vecinit, i32 %conv4, i32 1
   %call6.tmp.tmp = call spir_func float @_Z11read_imagef20ocl_image2d_depth_ro11ocl_samplerDv2_i(target("spirv.Image", void, 1, 1, 0, 0, 0, 0, 0) %input, i32 %imageSampler, <2 x i32> %vecinit5)
-  %arrayidx7 = getelementptr inbounds float, float addrspace(1)* %results, i32 %add
-  store float %call6.tmp.tmp, float addrspace(1)* %arrayidx7, align 4
+  %arrayidx7 = getelementptr inbounds float, ptr addrspace(1) %results, i32 %add
+  store float %call6.tmp.tmp, ptr addrspace(1) %arrayidx7, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpPhi_ArgumentsPlaceholders.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpPhi_ArgumentsPlaceholders.ll
index 63b517c805494..e8b812ad45ad1 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpPhi_ArgumentsPlaceholders.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpPhi_ArgumentsPlaceholders.ll
@@ -15,15 +15,15 @@
 ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
 ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
 
-%struct.Node = type { %struct.Node.0 addrspace(1)* }
+%struct.Node = type { ptr addrspace(1) }
 %struct.Node.0 = type opaque
 
-define spir_kernel void @verify_linked_lists(%struct.Node addrspace(1)* %pNodes) {
+define spir_kernel void @verify_linked_lists(ptr addrspace(1) %pNodes) {
 entry:
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %pNode.0 = phi %struct.Node addrspace(1)* [ %pNodes, %entry ], [ %1, %for.inc ]
+  %pNode.0 = phi ptr addrspace(1) [ %pNodes, %entry ], [ %1, %for.inc ]
   %j.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
 ; CHECK:      %[[#]] = OpPhi %[[#]] %[[#]] %[[#]] %[[#BitcastResultId:]] %[[#]]
 ; CHECK-NEXT: OpPhi
@@ -32,10 +32,10 @@ for.cond:                                         ; preds = %for.inc, %entry
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %pNext = getelementptr inbounds %struct.Node, %struct.Node addrspace(1)* %pNode.0, i32 0, i32 0
+  %pNext = getelementptr inbounds %struct.Node, ptr addrspace(1) %pNode.0, i32 0, i32 0
 
-  %0 = load %struct.Node.0 addrspace(1)*, %struct.Node.0 addrspace(1)* addrspace(1)* %pNext, align 4
-  %1 = bitcast %struct.Node.0 addrspace(1)* %0 to %struct.Node addrspace(1)*
+  %0 = load ptr addrspace(1), ptr addrspace(1) %pNext, align 4
+  %1 = bitcast ptr addrspace(1) %0 to ptr addrspace(1)
 ; CHECK: %[[#LoadResultId:]] = OpLoad %[[#]]
 ; CHECK: %[[#BitcastResultId]] = OpBitcast %[[#]] %[[#LoadResultId]]
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpVectorExtractDynamic.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpVectorExtractDynamic.ll
index ef92c4418d21d..2b67c303b33e2 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpVectorExtractDynamic.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpVectorExtractDynamic.ll
@@ -11,9 +11,9 @@
 
 ; CHECK-SPIRV: %[[#res:]] = OpVectorExtractDynamic %[[#float]] %[[#vec]] %[[#index]]
 
-define spir_kernel void @test(float addrspace(1)* nocapture %out, <2 x float> %vec, i32 %index) {
+define spir_kernel void @test(ptr addrspace(1) nocapture %out, <2 x float> %vec, i32 %index) {
 entry:
   %res = extractelement <2 x float> %vec, i32 %index
-  store float %res, float addrspace(1)* %out, align 4
+  store float %res, ptr addrspace(1) %out, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpVectorInsertDynamic_i16.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpVectorInsertDynamic_i16.ll
index 76f990ed6806a..5423690fc9dfb 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpVectorInsertDynamic_i16.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpVectorInsertDynamic_i16.ll
@@ -16,11 +16,11 @@
 ; CHECK:     %[[#vec2:]] = OpCompositeInsert %[[#int16_2]] %[[#const2]] %[[#vec1]] 1
 ; CHECK:     %[[#res:]] = OpVectorInsertDynamic %[[#int16_2]] %[[#vec2]] %[[#v:]] %[[#index:]]
 
-define spir_kernel void @test(<2 x i16>* nocapture %out, i16 %v, i32 %index) {
+define spir_kernel void @test(ptr nocapture %out, i16 %v, i32 %index) {
 entry:
   %vec1 = insertelement <2 x i16> undef, i16 4, i32 0
   %vec2 = insertelement <2 x i16> %vec1, i16 8, i32 1
   %res = insertelement <2 x i16> %vec2, i16 %v, i32 %index
-  store <2 x i16> %res, <2 x i16>* %out, align 4
+  store <2 x i16> %res, ptr %out, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
index 5e1752fadc61e..c121085cd6ade 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_cmpxchg.ll
@@ -37,16 +37,16 @@
 ; CHECK-SPIRV:     %[[#]] = OpAtomicCompareExchange %[[#UINT]] %[[#PTR]] %[[#WORKGROUP_SCOPE]] %[[#RELAXED]] %[[#RELAXED]] %[[#VAL]] %[[#CMP]]
 ; CHECK-SPIRV:     %[[#]] = OpAtomicCompareExchange %[[#UINT]] %[[#PTR]] %[[#WORKGROUP_SCOPE]] %[[#RELAXED]] %[[#RELAXED]] %[[#VAL]] %[[#CMP]]
 
-define dso_local spir_kernel void @test_atomic_cmpxchg(i32 addrspace(1)* noundef %p, i32 noundef %cmp, i32 noundef %val) local_unnamed_addr {
+define dso_local spir_kernel void @test_atomic_cmpxchg(ptr addrspace(1) noundef %p, i32 noundef %cmp, i32 noundef %val) local_unnamed_addr {
 entry:
-  %call = tail call spir_func i32 @_Z14atomic_cmpxchgPU3AS1Viii(i32 addrspace(1)* noundef %p, i32 noundef %cmp, i32 noundef %val)
-  %call1 = tail call spir_func i32 @_Z14atomic_cmpxchgPU3AS1Vjjj(i32 addrspace(1)* noundef %p, i32 noundef %cmp, i32 noundef %val)
+  %call = tail call spir_func i32 @_Z14atomic_cmpxchgPU3AS1Viii(ptr addrspace(1) noundef %p, i32 noundef %cmp, i32 noundef %val)
+  %call1 = tail call spir_func i32 @_Z14atomic_cmpxchgPU3AS1Vjjj(ptr addrspace(1) noundef %p, i32 noundef %cmp, i32 noundef %val)
   ret void
 }
 
-declare spir_func i32 @_Z14atomic_cmpxchgPU3AS1Viii(i32 addrspace(1)* noundef, i32 noundef, i32 noundef) local_unnamed_addr
+declare spir_func i32 @_Z14atomic_cmpxchgPU3AS1Viii(ptr addrspace(1) noundef, i32 noundef, i32 noundef) local_unnamed_addr
 
-declare spir_func i32 @_Z14atomic_cmpxchgPU3AS1Vjjj(i32 addrspace(1)* noundef, i32 noundef, i32 noundef) local_unnamed_addr
+declare spir_func i32 @_Z14atomic_cmpxchgPU3AS1Vjjj(ptr addrspace(1) noundef, i32 noundef, i32 noundef) local_unnamed_addr
 
 ;; References:
 ;; [1]: https://www.khronos.org/registry/OpenCL/sdk/2.0/docs/man/xhtml/atomic_cmpxchg.html
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
index d12fbb67f6160..0e2b94800d1a0 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/atomic_legacy.ll
@@ -31,16 +31,16 @@
 ; CHECK-SPIRV:     %[[#]] = OpAtomicIAdd %[[#UINT]] %[[#PTR]] %[[#WORKGROUP_SCOPE]] %[[#RELAXED]] %[[#VAL]]
 ; CHECK-SPIRV:     %[[#]] = OpAtomicIAdd %[[#UINT]] %[[#PTR]] %[[#WORKGROUP_SCOPE]] %[[#RELAXED]] %[[#VAL]]
 
-define dso_local spir_kernel void @test_legacy_atomics(i32 addrspace(1)* noundef %p, i32 noundef %val) local_unnamed_addr {
+define dso_local spir_kernel void @test_legacy_atomics(ptr addrspace(1) noundef %p, i32 noundef %val) local_unnamed_addr {
 entry:
-  %call = tail call spir_func i32 @_Z8atom_addPU3AS1Vii(i32 addrspace(1)* noundef %p, i32 noundef %val)
-  %call1 = tail call spir_func i32 @_Z10atomic_addPU3AS1Vii(i32 addrspace(1)* noundef %p, i32 noundef %val)
+  %call = tail call spir_func i32 @_Z8atom_addPU3AS1Vii(ptr addrspace(1) noundef %p, i32 noundef %val)
+  %call1 = tail call spir_func i32 @_Z10atomic_addPU3AS1Vii(ptr addrspace(1) noundef %p, i32 noundef %val)
   ret void
 }
 
-declare spir_func i32 @_Z8atom_addPU3AS1Vii(i32 addrspace(1)* noundef, i32 noundef) local_unnamed_addr
+declare spir_func i32 @_Z8atom_addPU3AS1Vii(ptr addrspace(1) noundef, i32 noundef) local_unnamed_addr
 
-declare spir_func i32 @_Z10atomic_addPU3AS1Vii(i32 addrspace(1)* noundef, i32 noundef) local_unnamed_addr
+declare spir_func i32 @_Z10atomic_addPU3AS1Vii(ptr addrspace(1) noundef, i32 noundef) local_unnamed_addr
 
 ;; References:
 ;; [1]: https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-legacy
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
index 5d9840d3bd5b9..5fb17deb0ab32 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpenCL/sub_group_mask.ll
@@ -9,10 +9,10 @@
 ;;   *out = get_sub_group_gt_mask();
 ;; }
 
-define dso_local spir_kernel void @test_mask(<4 x i32> addrspace(1)* nocapture noundef writeonly %out) local_unnamed_addr {
+define dso_local spir_kernel void @test_mask(ptr addrspace(1) nocapture noundef writeonly %out) local_unnamed_addr {
 entry:
   %call = tail call spir_func <4 x i32> @_Z21get_sub_group_gt_maskv()
-  store <4 x i32> %call, <4 x i32> addrspace(1)* %out, align 16
+  store <4 x i32> %call, ptr addrspace(1) %out, align 16
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/RelationalOperators.ll b/llvm/test/CodeGen/SPIRV/transcoding/RelationalOperators.ll
index 7877b4ea8ac22..3c37518bb52fd 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/RelationalOperators.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/RelationalOperators.ll
@@ -13,11 +13,11 @@
 ;;   res[0] = a > b;
 ;; }
 
-define dso_local spir_kernel void @testUGreaterThan(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testUGreaterThan(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = icmp ugt <2 x i32> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -31,11 +31,11 @@ entry:
 ;;   res[0] = a > b;
 ;; }
 
-define dso_local spir_kernel void @testSGreaterThan(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testSGreaterThan(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = icmp sgt <2 x i32> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -49,11 +49,11 @@ entry:
 ;;   res[0] = a >= b;
 ;; }
 
-define dso_local spir_kernel void @testUGreaterThanEqual(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testUGreaterThanEqual(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = icmp uge <2 x i32> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -67,11 +67,11 @@ entry:
 ;;   res[0] = a >= b;
 ;; }
 
-define dso_local spir_kernel void @testSGreaterThanEqual(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testSGreaterThanEqual(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = icmp sge <2 x i32> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -85,11 +85,11 @@ entry:
 ;;   res[0] = a < b;
 ;; }
 
-define dso_local spir_kernel void @testULessThan(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testULessThan(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = icmp ult <2 x i32> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -103,11 +103,11 @@ entry:
 ;;   res[0] = a < b;
 ;; }
 
-define dso_local spir_kernel void @testSLessThan(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testSLessThan(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = icmp slt <2 x i32> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -121,11 +121,11 @@ entry:
 ;;   res[0] = a <= b;
 ;; }
 
-define dso_local spir_kernel void @testULessThanEqual(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testULessThanEqual(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = icmp ule <2 x i32> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -139,11 +139,11 @@ entry:
 ;;   res[0] = a <= b;
 ;; }
 
-define dso_local spir_kernel void @testSLessThanEqual(<2 x i32> noundef %a, <2 x i32> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testSLessThanEqual(<2 x i32> noundef %a, <2 x i32> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = icmp sle <2 x i32> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -157,11 +157,11 @@ entry:
 ;;   res[0] = a == b;
 ;; }
 
-define dso_local spir_kernel void @testFOrdEqual(<2 x float> noundef %a, <2 x float> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFOrdEqual(<2 x float> noundef %a, <2 x float> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = fcmp oeq <2 x float> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -175,11 +175,11 @@ entry:
 ;;   res[0] = a != b;
 ;; }
 
-define dso_local spir_kernel void @testFUnordNotEqual(<2 x float> noundef %a, <2 x float> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFUnordNotEqual(<2 x float> noundef %a, <2 x float> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = fcmp une <2 x float> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -193,11 +193,11 @@ entry:
 ;;   res[0] = a > b;
 ;; }
 
-define dso_local spir_kernel void @testFOrdGreaterThan(<2 x float> noundef %a, <2 x float> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFOrdGreaterThan(<2 x float> noundef %a, <2 x float> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = fcmp ogt <2 x float> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -211,11 +211,11 @@ entry:
 ;;   res[0] = a >= b;
 ;; }
 
-define dso_local spir_kernel void @testFOrdGreaterThanEqual(<2 x float> noundef %a, <2 x float> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFOrdGreaterThanEqual(<2 x float> noundef %a, <2 x float> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = fcmp oge <2 x float> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -229,11 +229,11 @@ entry:
 ;;   res[0] = a < b;
 ;; }
 
-define dso_local spir_kernel void @testFOrdLessThan(<2 x float> noundef %a, <2 x float> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFOrdLessThan(<2 x float> noundef %a, <2 x float> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = fcmp olt <2 x float> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -247,10 +247,10 @@ entry:
 ;;   res[0] = a <= b;
 ;; }
 
-define dso_local spir_kernel void @testFOrdLessThanEqual(<2 x float> noundef %a, <2 x float> noundef %b, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFOrdLessThanEqual(<2 x float> noundef %a, <2 x float> noundef %b, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %cmp = fcmp ole <2 x float> %a, %b
   %sext = sext <2 x i1> %cmp to <2 x i32>
-  store <2 x i32> %sext, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %sext, ptr addrspace(1) %res, align 8
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/RelationalOperatorsFUnord.ll b/llvm/test/CodeGen/SPIRV/transcoding/RelationalOperatorsFUnord.ll
index 29164c20d95d2..10bf8f1bcb618 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/RelationalOperatorsFUnord.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/RelationalOperatorsFUnord.ll
@@ -13,7 +13,7 @@
 define spir_kernel void @testFUnordEqual(<2 x float> %a, <2 x float> %b) {
 entry:
   %0 = fcmp ueq <2 x float> %a, %b
-  store <2 x i1> %0, <2 x i1> addrspace(1)* @var
+  store <2 x i1> %0, ptr addrspace(1) @var
   ret void
 }
 
@@ -26,7 +26,7 @@ entry:
 define spir_kernel void @testFUnordGreaterThan(<2 x float> %a, <2 x float> %b) {
 entry:
   %0 = fcmp ugt <2 x float> %a, %b
-  store <2 x i1> %0, <2 x i1> addrspace(1)* @var
+  store <2 x i1> %0, ptr addrspace(1) @var
   ret void
 }
 
@@ -39,7 +39,7 @@ entry:
 define spir_kernel void @testFUnordGreaterThanEqual(<2 x float> %a, <2 x float> %b) {
 entry:
   %0 = fcmp uge <2 x float> %a, %b
-  store <2 x i1> %0, <2 x i1> addrspace(1)* @var
+  store <2 x i1> %0, ptr addrspace(1) @var
   ret void
 }
 
@@ -52,7 +52,7 @@ entry:
 define spir_kernel void @testFUnordLessThan(<2 x float> %a, <2 x float> %b) {
 entry:
   %0 = fcmp ult <2 x float> %a, %b
-  store <2 x i1> %0, <2 x i1> addrspace(1)* @var
+  store <2 x i1> %0, ptr addrspace(1) @var
   ret void
 }
 
@@ -65,6 +65,6 @@ entry:
 define spir_kernel void @testFUnordLessThanEqual(<2 x float> %a, <2 x float> %b) {
 entry:
   %0 = fcmp ule <2 x float> %a, %b
-  store <2 x i1> %0, <2 x i1> addrspace(1)* @var
+  store <2 x i1> %0, ptr addrspace(1) @var
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll b/llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll
index 8a90e40e88817..23d1c125e2be0 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll
@@ -42,16 +42,16 @@
 ; CHECK-SPIRV: %[[#SampledImage3:]] = OpSampledImage %[[#SampledImageTy]] %[[#InputImage]] %[[#ConstSampler2]]
 ; CHECK-SPIRV: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SampledImage3]]
 
-define dso_local spir_kernel void @sample_kernel_float(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, <2 x float> noundef %coords, <4 x float> addrspace(1)* nocapture noundef writeonly %results, target("spirv.Sampler") %argSampl) local_unnamed_addr {
+define dso_local spir_kernel void @sample_kernel_float(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, <2 x float> noundef %coords, ptr addrspace(1) nocapture noundef writeonly %results, target("spirv.Sampler") %argSampl) local_unnamed_addr {
 entry:
   %0 = tail call spir_func target("spirv.Sampler") @__translate_sampler_initializer(i32 32)
   %call = tail call spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_f(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, target("spirv.Sampler") %0, <2 x float> noundef %coords)
-  store <4 x float> %call, <4 x float> addrspace(1)* %results, align 16
+  store <4 x float> %call, ptr addrspace(1) %results, align 16
   %call1 = tail call spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_f(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, target("spirv.Sampler") %argSampl, <2 x float> noundef %coords)
-  store <4 x float> %call1, <4 x float> addrspace(1)* %results, align 16
+  store <4 x float> %call1, ptr addrspace(1) %results, align 16
   %1 = tail call spir_func target("spirv.Sampler") @__translate_sampler_initializer(i32 22)
   %call2 = tail call spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_f(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, target("spirv.Sampler") %1, <2 x float> noundef %coords)
-  store <4 x float> %call2, <4 x float> addrspace(1)* %results, align 16
+  store <4 x float> %call2, ptr addrspace(1) %results, align 16
   ret void
 }
 
@@ -72,16 +72,16 @@ declare spir_func target("spirv.Sampler") @__translate_sampler_initializer(i32)
 ; CHECK-SPIRV: %[[#SampledImage6:]] = OpSampledImage %[[#SampledImageTy]] %[[#InputImage]] %[[#ConstSampler2]]
 ; CHECK-SPIRV: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SampledImage6]]
 
-define dso_local spir_kernel void @sample_kernel_int(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, <2 x float> noundef %coords, <4 x i32> addrspace(1)* nocapture noundef writeonly %results, target("spirv.Sampler") %argSampl) local_unnamed_addr {
+define dso_local spir_kernel void @sample_kernel_int(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, <2 x float> noundef %coords, ptr addrspace(1) nocapture noundef writeonly %results, target("spirv.Sampler") %argSampl) local_unnamed_addr {
 entry:
   %0 = tail call spir_func target("spirv.Sampler") @__translate_sampler_initializer(i32 32)
   %call = tail call spir_func <4 x i32> @_Z11read_imagei14ocl_image2d_ro11ocl_samplerDv2_f(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, target("spirv.Sampler") %0, <2 x float> noundef %coords)
-  store <4 x i32> %call, <4 x i32> addrspace(1)* %results, align 16
+  store <4 x i32> %call, ptr addrspace(1) %results, align 16
   %call1 = tail call spir_func <4 x i32> @_Z11read_imagei14ocl_image2d_ro11ocl_samplerDv2_f(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, target("spirv.Sampler") %argSampl, <2 x float> noundef %coords)
-  store <4 x i32> %call1, <4 x i32> addrspace(1)* %results, align 16
+  store <4 x i32> %call1, ptr addrspace(1) %results, align 16
   %1 = tail call spir_func target("spirv.Sampler") @__translate_sampler_initializer(i32 22)
   %call2 = tail call spir_func <4 x i32> @_Z11read_imagei14ocl_image2d_ro11ocl_samplerDv2_f(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, target("spirv.Sampler") %1, <2 x float> noundef %coords)
-  store <4 x i32> %call2, <4 x i32> addrspace(1)* %results, align 16
+  store <4 x i32> %call2, ptr addrspace(1) %results, align 16
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/SpecConstantComposite.ll b/llvm/test/CodeGen/SPIRV/transcoding/SpecConstantComposite.ll
index e6f5c7ddb078d..a17cb2eea0542 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/SpecConstantComposite.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/SpecConstantComposite.ll
@@ -24,15 +24,15 @@
 
 $_ZTS4Test = comdat any
 
-define weak_odr dso_local spir_kernel void @_ZTS4Test(%struct._ZTS3POD.POD addrspace(1)* %_arg_, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_1, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_2, %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id"* byval(%"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id") align 8 %_arg_3) local_unnamed_addr comdat {
+define weak_odr dso_local spir_kernel void @_ZTS4Test(ptr addrspace(1) %_arg_, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_1, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_2, ptr byval(%"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id") align 8 %_arg_3) local_unnamed_addr comdat {
 entry:
   %ref.tmp.i = alloca %struct._ZTS3POD.POD, align 8
-  %0 = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id", %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id"* %_arg_3, i64 0, i32 0, i32 0, i64 0
-  %1 = load i64, i64* %0, align 8
-  %add.ptr.i = getelementptr inbounds %struct._ZTS3POD.POD, %struct._ZTS3POD.POD addrspace(1)* %_arg_, i64 %1
-  %2 = bitcast %struct._ZTS3POD.POD* %ref.tmp.i to i8*
-  call void @llvm.lifetime.start.p0i8(i64 24, i8* nonnull %2)
-  %3 = addrspacecast %struct._ZTS3POD.POD* %ref.tmp.i to %struct._ZTS3POD.POD addrspace(4)*
+  %0 = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id", ptr %_arg_3, i64 0, i32 0, i32 0, i64 0
+  %1 = load i64, ptr %0, align 8
+  %add.ptr.i = getelementptr inbounds %struct._ZTS3POD.POD, ptr addrspace(1) %_arg_, i64 %1
+  %2 = bitcast ptr %ref.tmp.i to ptr
+  call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %2)
+  %3 = addrspacecast ptr %ref.tmp.i to ptr addrspace(4)
 
   %4 = call i32 @_Z20__spirv_SpecConstantii(i32 3, i32 1)
 ; CHECK-SPIRV-DAG: %[[#SC3]] = OpSpecConstant %[[#Int]] 1
@@ -70,21 +70,21 @@ entry:
   %15 = call %struct._ZTS3POD.POD @"_Z29__spirv_SpecConstantCompositeAstruct._ZTS1A.Aclass._ZTSN2cl4sycl3vecIiLi2EEE.cl::sycl::vec"([2 x %struct._ZTS1A.A] %10, %"class._ZTSN2cl4sycl3vecIiLi2EEE.cl::sycl::vec" %14)
 ; CHECK-SPIRV-DAG: %[[#SC_POD:]] = OpSpecConstantComposite %[[#POD_TYPE]] %[[#SC_Array]] %[[#SC_Struct]]
 
-  store %struct._ZTS3POD.POD %15, %struct._ZTS3POD.POD addrspace(4)* %3, align 8
+  store %struct._ZTS3POD.POD %15, ptr addrspace(4) %3, align 8
 ; CHECK-SPIRV-DAG: OpStore %[[#]] %[[#SC_POD]]
 
-  %16 = bitcast %struct._ZTS3POD.POD addrspace(1)* %add.ptr.i to i8 addrspace(1)*
-  %17 = addrspacecast i8 addrspace(1)* %16 to i8 addrspace(4)*
-  call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 8 dereferenceable(24) %17, i8* nonnull align 8 dereferenceable(24) %2, i64 24, i1 false)
-  call void @llvm.lifetime.end.p0i8(i64 24, i8* nonnull %2)
+  %16 = bitcast ptr addrspace(1) %add.ptr.i to ptr addrspace(1)
+  %17 = addrspacecast ptr addrspace(1) %16 to ptr addrspace(4)
+  call void @llvm.memcpy.p4.p0.i64(ptr addrspace(4) align 8 dereferenceable(24) %17, ptr nonnull align 8 dereferenceable(24) %2, i64 24, i1 false)
+  call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %2)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
-declare void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.p4.p0.i64(ptr addrspace(4) noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
 
 declare i32 @_Z20__spirv_SpecConstantii(i32, i32)
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/TransFNeg.ll b/llvm/test/CodeGen/SPIRV/transcoding/TransFNeg.ll
index eb52a775d38ee..5738049e1a871 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/TransFNeg.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/TransFNeg.ll
@@ -15,36 +15,36 @@
 ;;    *d = -*d;
 ;; }
 
-define dso_local spir_kernel void @foo(double noundef %a1, half addrspace(1)* noundef %h, float addrspace(1)* noundef %b0, double addrspace(1)* noundef %b1, <8 x double> addrspace(1)* noundef %d) {
+define dso_local spir_kernel void @foo(double noundef %a1, ptr addrspace(1) noundef %h, ptr addrspace(1) noundef %b0, ptr addrspace(1) noundef %b1, ptr addrspace(1) noundef %d) {
 entry:
   %a1.addr = alloca double, align 8
-  %h.addr = alloca half addrspace(1)*, align 4
-  %b0.addr = alloca float addrspace(1)*, align 4
-  %b1.addr = alloca double addrspace(1)*, align 4
-  %d.addr = alloca <8 x double> addrspace(1)*, align 4
-  store double %a1, double* %a1.addr, align 8
-  store half addrspace(1)* %h, half addrspace(1)** %h.addr, align 4
-  store float addrspace(1)* %b0, float addrspace(1)** %b0.addr, align 4
-  store double addrspace(1)* %b1, double addrspace(1)** %b1.addr, align 4
-  store <8 x double> addrspace(1)* %d, <8 x double> addrspace(1)** %d.addr, align 4
-  %0 = load half addrspace(1)*, half addrspace(1)** %h.addr, align 4
-  %1 = load half, half addrspace(1)* %0, align 2
+  %h.addr = alloca ptr addrspace(1), align 4
+  %b0.addr = alloca ptr addrspace(1), align 4
+  %b1.addr = alloca ptr addrspace(1), align 4
+  %d.addr = alloca ptr addrspace(1), align 4
+  store double %a1, ptr %a1.addr, align 8
+  store ptr addrspace(1) %h, ptr %h.addr, align 4
+  store ptr addrspace(1) %b0, ptr %b0.addr, align 4
+  store ptr addrspace(1) %b1, ptr %b1.addr, align 4
+  store ptr addrspace(1) %d, ptr %d.addr, align 4
+  %0 = load ptr addrspace(1), ptr %h.addr, align 4
+  %1 = load half, ptr addrspace(1) %0, align 2
   %fneg = fneg half %1
-  %2 = load half addrspace(1)*, half addrspace(1)** %h.addr, align 4
-  store half %fneg, half addrspace(1)* %2, align 2
-  %3 = load float addrspace(1)*, float addrspace(1)** %b0.addr, align 4
-  %4 = load float, float addrspace(1)* %3, align 4
+  %2 = load ptr addrspace(1), ptr %h.addr, align 4
+  store half %fneg, ptr addrspace(1) %2, align 2
+  %3 = load ptr addrspace(1), ptr %b0.addr, align 4
+  %4 = load float, ptr addrspace(1) %3, align 4
   %fneg1 = fneg float %4
-  %5 = load float addrspace(1)*, float addrspace(1)** %b0.addr, align 4
-  store float %fneg1, float addrspace(1)* %5, align 4
-  %6 = load double, double* %a1.addr, align 8
+  %5 = load ptr addrspace(1), ptr %b0.addr, align 4
+  store float %fneg1, ptr addrspace(1) %5, align 4
+  %6 = load double, ptr %a1.addr, align 8
   %fneg2 = fneg double %6
-  %7 = load double addrspace(1)*, double addrspace(1)** %b1.addr, align 4
-  store double %fneg2, double addrspace(1)* %7, align 8
-  %8 = load <8 x double> addrspace(1)*, <8 x double> addrspace(1)** %d.addr, align 4
-  %9 = load <8 x double>, <8 x double> addrspace(1)* %8, align 64
+  %7 = load ptr addrspace(1), ptr %b1.addr, align 4
+  store double %fneg2, ptr addrspace(1) %7, align 8
+  %8 = load ptr addrspace(1), ptr %d.addr, align 4
+  %9 = load <8 x double>, ptr addrspace(1) %8, align 64
   %fneg3 = fneg <8 x double> %9
-  %10 = load <8 x double> addrspace(1)*, <8 x double> addrspace(1)** %d.addr, align 4
-  store <8 x double> %fneg3, <8 x double> addrspace(1)* %10, align 64
+  %10 = load ptr addrspace(1), ptr %d.addr, align 4
+  store <8 x double> %fneg3, ptr addrspace(1) %10, align 64
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll b/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
index 17a915e33c973..1d366d69a48a2 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/atomic_load_store.ll
@@ -12,9 +12,9 @@
 ; CHECK-SPIRV:        OpReturnValue %[[#ret]]
 ; CHECK-SPIRV-LABEL:  OpFunctionEnd
 
-define spir_func i32 @test_load(i32 addrspace(4)* %object) {
+define spir_func i32 @test_load(ptr addrspace(4) %object) {
 entry:
-  %0 = call spir_func i32 @_Z11atomic_loadPVU3AS4U7_Atomici(i32 addrspace(4)* %object)
+  %0 = call spir_func i32 @_Z11atomic_loadPVU3AS4U7_Atomici(ptr addrspace(4) %object)
   ret i32 %0
 }
 
@@ -24,14 +24,14 @@ entry:
 ; CHECK-SPIRV:        OpAtomicStore %[[#object]] %[[#]] %[[#]] %[[#desired]]
 ; CHECK-SPIRV-LABEL:  OpFunctionEnd
 
-define spir_func void @test_store(i32 addrspace(4)* %object, i32 %desired) {
+define spir_func void @test_store(ptr addrspace(4) %object, i32 %desired) {
 entry:
-  call spir_func void @_Z12atomic_storePVU3AS4U7_Atomicii(i32 addrspace(4)* %object, i32 %desired)
+  call spir_func void @_Z12atomic_storePVU3AS4U7_Atomicii(ptr addrspace(4) %object, i32 %desired)
   ret void
 }
 
-declare spir_func i32 @_Z11atomic_loadPVU3AS4U7_Atomici(i32 addrspace(4)*)
-declare spir_func void @_Z12atomic_storePVU3AS4U7_Atomicii(i32 addrspace(4)*, i32)
+declare spir_func i32 @_Z11atomic_loadPVU3AS4U7_Atomici(ptr addrspace(4))
+declare spir_func void @_Z12atomic_storePVU3AS4U7_Atomicii(ptr addrspace(4), i32)
 
 ; The goal of @test_typesX() cases is to ensure that a correct pointer type
 ; is deduced from the Value argument of OpAtomicLoad/OpAtomicStore. There is
@@ -53,14 +53,14 @@ entry:
 
 define spir_func void @test_types3(i64 noundef %arg, float %val) {
 entry:
-  %ptr1 = inttoptr i64 %arg to float addrspace(1)*
+  %ptr1 = inttoptr i64 %arg to ptr addrspace(1)
   %r = call spir_func float @atomic_load(ptr addrspace(1) %ptr1)
   ret void
 }
 
 define spir_func void @test_types4(i64 noundef %arg, float %val) {
 entry:
-  %ptr2 = inttoptr i64 %arg to float addrspace(1)*
+  %ptr2 = inttoptr i64 %arg to ptr addrspace(1)
   call spir_func void @atomic_store(ptr addrspace(1) %ptr2, float %val)
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll b/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
index 2c0fc393b135a..be6d7cd29548c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/bitcast.ll
@@ -5,16 +5,16 @@
 
 ; CHECK: Bitcast
 
-define spir_kernel void @test_fn(<2 x i8> addrspace(1)* nocapture readonly %src, i16 addrspace(1)* nocapture %dst) {
+define spir_kernel void @test_fn(ptr addrspace(1) nocapture readonly %src, ptr addrspace(1) nocapture %dst) {
 entry:
   %call = tail call spir_func i64 @_Z13get_global_idj(i32 0)
   %sext = shl i64 %call, 32
   %idxprom = ashr exact i64 %sext, 32
-  %arrayidx = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %src, i64 %idxprom
-  %0 = load <2 x i8>, <2 x i8> addrspace(1)* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds <2 x i8>, ptr addrspace(1) %src, i64 %idxprom
+  %0 = load <2 x i8>, ptr addrspace(1) %arrayidx, align 2
   %astype = bitcast <2 x i8> %0 to i16
-  %arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %dst, i64 %idxprom
-  store i16 %astype, i16 addrspace(1)* %arrayidx2, align 2
+  %arrayidx2 = getelementptr inbounds i16, ptr addrspace(1) %dst, i64 %idxprom
+  store i16 %astype, ptr addrspace(1) %arrayidx2, align 2
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll b/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
index 2249cbe4e98a5..d806013885bca 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/block_w_struct_return.ll
@@ -44,68 +44,68 @@
 ; CHECK-SPIRV: %[[#BlockLit:]] = OpPtrCastToGeneric %[[#Int8Ptr]] %[[#]]
 ; CHECK-SPIRV: %[[#]] = OpFunctionCall %[[#]] %[[#BlockInv]] %[[#StructRet]] %[[#BlockLit]] %[[#StructArg]]
 
-%struct.__opencl_block_literal_generic = type { i32, i32, i8 addrspace(4)* }
+%struct.__opencl_block_literal_generic = type { i32, i32, ptr addrspace(4) }
 %struct.A = type { i32 }
 
- at __block_literal_global = internal addrspace(1) constant { i32, i32, i8 addrspace(4)* } { i32 12, i32 4, i8 addrspace(4)* addrspacecast (i8* bitcast (void (%struct.A*, i8 addrspace(4)*, %struct.A*)* @__block_ret_struct_block_invoke to i8*) to i8 addrspace(4)*) }, align 4
+ at __block_literal_global = internal addrspace(1) constant { i32, i32, ptr addrspace(4) } { i32 12, i32 4, ptr addrspace(4) addrspacecast (ptr @__block_ret_struct_block_invoke to ptr addrspace(4)) }, align 4
 
-define dso_local spir_kernel void @block_ret_struct(i32 addrspace(1)* noundef %res) {
+define dso_local spir_kernel void @block_ret_struct(ptr addrspace(1) noundef %res) {
 entry:
-  %res.addr = alloca i32 addrspace(1)*, align 4
-  %kernelBlock = alloca %struct.__opencl_block_literal_generic addrspace(4)*, align 4
+  %res.addr = alloca ptr addrspace(1), align 4
+  %kernelBlock = alloca ptr addrspace(4), align 4
   %tid = alloca i32, align 4
   %aa = alloca %struct.A, align 4
   %tmp = alloca %struct.A, align 4
-  store i32 addrspace(1)* %res, i32 addrspace(1)** %res.addr, align 4
-  %0 = bitcast %struct.__opencl_block_literal_generic addrspace(4)** %kernelBlock to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %0)
-  store %struct.__opencl_block_literal_generic addrspace(4)* addrspacecast (%struct.__opencl_block_literal_generic addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global to %struct.__opencl_block_literal_generic addrspace(1)*) to %struct.__opencl_block_literal_generic addrspace(4)*), %struct.__opencl_block_literal_generic addrspace(4)** %kernelBlock, align 4
-  %1 = bitcast i32* %tid to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %1)
+  store ptr addrspace(1) %res, ptr %res.addr, align 4
+  %0 = bitcast ptr %kernelBlock to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %0)
+  store ptr addrspace(4) addrspacecast (ptr addrspace(1) @__block_literal_global to ptr addrspace(4)), ptr %kernelBlock, align 4
+  %1 = bitcast ptr %tid to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %1)
   %call = call spir_func i32 @_Z13get_global_idj(i32 noundef 0)
-  store i32 %call, i32* %tid, align 4
-  %2 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 4
-  %3 = load i32, i32* %tid, align 4
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %2, i32 %3
-  store i32 -1, i32 addrspace(1)* %arrayidx, align 4
-  %4 = bitcast %struct.A* %aa to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* %4)
-  %a = getelementptr inbounds %struct.A, %struct.A* %aa, i32 0, i32 0
-  store i32 5, i32* %a, align 4
-  call spir_func void @__block_ret_struct_block_invoke(%struct.A* sret(%struct.A) align 4 %tmp, i8 addrspace(4)* noundef addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global to i8 addrspace(1)*) to i8 addrspace(4)*), %struct.A* noundef byval(%struct.A) align 4 %aa)
-  %a1 = getelementptr inbounds %struct.A, %struct.A* %tmp, i32 0, i32 0
-  %5 = load i32, i32* %a1, align 4
+  store i32 %call, ptr %tid, align 4
+  %2 = load ptr addrspace(1), ptr %res.addr, align 4
+  %3 = load i32, ptr %tid, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %2, i32 %3
+  store i32 -1, ptr addrspace(1) %arrayidx, align 4
+  %4 = bitcast ptr %aa to ptr
+  call void @llvm.lifetime.start.p0(i64 4, ptr %4)
+  %a = getelementptr inbounds %struct.A, ptr %aa, i32 0, i32 0
+  store i32 5, ptr %a, align 4
+  call spir_func void @__block_ret_struct_block_invoke(ptr sret(%struct.A) align 4 %tmp, ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @__block_literal_global to ptr addrspace(4)), ptr noundef byval(%struct.A) align 4 %aa)
+  %a1 = getelementptr inbounds %struct.A, ptr %tmp, i32 0, i32 0
+  %5 = load i32, ptr %a1, align 4
   %sub = sub nsw i32 %5, 6
-  %6 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 4
-  %7 = load i32, i32* %tid, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %6, i32 %7
-  store i32 %sub, i32 addrspace(1)* %arrayidx2, align 4
-  %8 = bitcast %struct.A* %aa to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %8)
-  %9 = bitcast i32* %tid to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %9)
-  %10 = bitcast %struct.__opencl_block_literal_generic addrspace(4)** %kernelBlock to i8*
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* %10)
+  %6 = load ptr addrspace(1), ptr %res.addr, align 4
+  %7 = load i32, ptr %tid, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %6, i32 %7
+  store i32 %sub, ptr addrspace(1) %arrayidx2, align 4
+  %8 = bitcast ptr %aa to ptr
+  call void @llvm.lifetime.end.p0(i64 4, ptr %8)
+  %9 = bitcast ptr %tid to ptr
+  call void @llvm.lifetime.end.p0(i64 4, ptr %9)
+  %10 = bitcast ptr %kernelBlock to ptr
+  call void @llvm.lifetime.end.p0(i64 4, ptr %10)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-define internal spir_func void @__block_ret_struct_block_invoke(%struct.A* noalias sret(%struct.A) align 4 %agg.result, i8 addrspace(4)* noundef %.block_descriptor, %struct.A* noundef byval(%struct.A) align 4 %a) {
+define internal spir_func void @__block_ret_struct_block_invoke(ptr noalias sret(%struct.A) align 4 %agg.result, ptr addrspace(4) noundef %.block_descriptor, ptr noundef byval(%struct.A) align 4 %a) {
 entry:
-  %.block_descriptor.addr = alloca i8 addrspace(4)*, align 4
-  store i8 addrspace(4)* %.block_descriptor, i8 addrspace(4)** %.block_descriptor.addr, align 4
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)* }> addrspace(4)*
-  %a1 = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 0
-  store i32 6, i32* %a1, align 4
-  %0 = bitcast %struct.A* %agg.result to i8*
-  %1 = bitcast %struct.A* %a to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 4, i1 false)
+  %.block_descriptor.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %.block_descriptor, ptr %.block_descriptor.addr, align 4
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
+  %a1 = getelementptr inbounds %struct.A, ptr %a, i32 0, i32 0
+  store i32 6, ptr %a1, align 4
+  %0 = bitcast ptr %agg.result to ptr
+  %1 = bitcast ptr %a to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %0, ptr align 4 %1, i32 4, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
 
 declare spir_func i32 @_Z13get_global_idj(i32 noundef)
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
index 44d2f5e24f59d..61bc28e2495dc 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars.ll
@@ -9,11 +9,11 @@
 
 @__spirv_BuiltInGlobalLinearId = external addrspace(1) global i32
 
-define spir_kernel void @f(i32 addrspace(1)* nocapture %order) {
+define spir_kernel void @f(ptr addrspace(1) nocapture %order) {
 entry:
-  %0 = load i32, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @__spirv_BuiltInGlobalLinearId to i32 addrspace(4)*), align 4
+  %0 = load i32, ptr addrspace(4) addrspacecast (ptr addrspace(1) @__spirv_BuiltInGlobalLinearId to ptr addrspace(4)), align 4
   ;; Need to store the result somewhere, otherwise the access to GlobalLinearId
   ;; may be removed.
-  store i32 %0, i32 addrspace(1)* %order, align 4
+  store i32 %0, ptr addrspace(1) %order, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
index 53883fd1691f5..bccbcc7cd38f4 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_arithmetics.ll
@@ -46,19 +46,19 @@ $"_ZTSZZ4mainENK3$_0clERN2cl4sycl7handlerEE11dim2_subscr" = comdat any
 @__spirv_BuiltInGlobalSize = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32
 @__spirv_BuiltInGlobalOffset = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32
 
-define weak_odr dso_local spir_kernel void @"_ZTSZZ4mainENK3$_0clERN2cl4sycl7handlerEE11dim2_subscr"(i32 addrspace(1)* %_arg_, %"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range") align 8 %_arg_1, %"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range") align 8 %_arg_2, %"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id"* byval(%"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id") align 8 %_arg_3) local_unnamed_addr comdat {
+define weak_odr dso_local spir_kernel void @"_ZTSZZ4mainENK3$_0clERN2cl4sycl7handlerEE11dim2_subscr"(ptr addrspace(1) %_arg_, ptr byval(%"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range") align 8 %_arg_1, ptr byval(%"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range") align 8 %_arg_2, ptr byval(%"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id") align 8 %_arg_3) local_unnamed_addr comdat {
 entry:
-  %agg.tmp4.sroa.0.sroa.2.0.agg.tmp4.sroa.0.0..sroa_cast.sroa_idx65 = getelementptr inbounds %"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range", %"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range"* %_arg_2, i64 0, i32 0, i32 0, i64 1
-  %agg.tmp4.sroa.0.sroa.2.0.copyload = load i64, i64* %agg.tmp4.sroa.0.sroa.2.0.agg.tmp4.sroa.0.0..sroa_cast.sroa_idx65, align 8
-  %agg.tmp5.sroa.0.sroa.0.0.agg.tmp5.sroa.0.0..sroa_cast.sroa_idx = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id", %"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id"* %_arg_3, i64 0, i32 0, i32 0, i64 0
-  %agg.tmp5.sroa.0.sroa.0.0.copyload = load i64, i64* %agg.tmp5.sroa.0.sroa.0.0.agg.tmp5.sroa.0.0..sroa_cast.sroa_idx, align 8
-  %agg.tmp5.sroa.0.sroa.2.0.agg.tmp5.sroa.0.0..sroa_cast.sroa_idx69 = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id", %"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id"* %_arg_3, i64 0, i32 0, i32 0, i64 1
-  %agg.tmp5.sroa.0.sroa.2.0.copyload = load i64, i64* %agg.tmp5.sroa.0.sroa.2.0.agg.tmp5.sroa.0.0..sroa_cast.sroa_idx69, align 8
-  %0 = load <3 x i64>, <3 x i64> addrspace(4)* addrspacecast (<3 x i64> addrspace(1)* @__spirv_BuiltInGlobalInvocationId to <3 x i64> addrspace(4)*), align 32
+  %agg.tmp4.sroa.0.sroa.2.0.agg.tmp4.sroa.0.0..sroa_cast.sroa_idx65 = getelementptr inbounds %"class._ZTSN2cl4sycl5rangeILi2EEE.cl::sycl::range", ptr %_arg_2, i64 0, i32 0, i32 0, i64 1
+  %agg.tmp4.sroa.0.sroa.2.0.copyload = load i64, ptr %agg.tmp4.sroa.0.sroa.2.0.agg.tmp4.sroa.0.0..sroa_cast.sroa_idx65, align 8
+  %agg.tmp5.sroa.0.sroa.0.0.agg.tmp5.sroa.0.0..sroa_cast.sroa_idx = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id", ptr %_arg_3, i64 0, i32 0, i32 0, i64 0
+  %agg.tmp5.sroa.0.sroa.0.0.copyload = load i64, ptr %agg.tmp5.sroa.0.sroa.0.0.agg.tmp5.sroa.0.0..sroa_cast.sroa_idx, align 8
+  %agg.tmp5.sroa.0.sroa.2.0.agg.tmp5.sroa.0.0..sroa_cast.sroa_idx69 = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi2EEE.cl::sycl::id", ptr %_arg_3, i64 0, i32 0, i32 0, i64 1
+  %agg.tmp5.sroa.0.sroa.2.0.copyload = load i64, ptr %agg.tmp5.sroa.0.sroa.2.0.agg.tmp5.sroa.0.0..sroa_cast.sroa_idx69, align 8
+  %0 = load <3 x i64>, ptr addrspace(4) addrspacecast (ptr addrspace(1) @__spirv_BuiltInGlobalInvocationId to ptr addrspace(4)), align 32
   %1 = extractelement <3 x i64> %0, i64 1
   %2 = extractelement <3 x i64> %0, i64 0
-  %3 = load <3 x i64>, <3 x i64> addrspace(4)* addrspacecast (<3 x i64> addrspace(1)* @__spirv_BuiltInGlobalSize to <3 x i64> addrspace(4)*), align 32
-  %4 = load <3 x i64>, <3 x i64> addrspace(4)* addrspacecast (<3 x i64> addrspace(1)* @__spirv_BuiltInGlobalOffset to <3 x i64> addrspace(4)*), align 32
+  %3 = load <3 x i64>, ptr addrspace(4) addrspacecast (ptr addrspace(1) @__spirv_BuiltInGlobalSize to ptr addrspace(4)), align 32
+  %4 = load <3 x i64>, ptr addrspace(4) addrspacecast (ptr addrspace(1) @__spirv_BuiltInGlobalOffset to ptr addrspace(4)), align 32
   %5 = sub <3 x i64> %0, %4
   %6 = sub <3 x i64> %0, %4
   %7 = extractelement <3 x i64> %6, i64 0
@@ -70,11 +70,11 @@ entry:
   %mul.1.i.i.i.i = mul i64 %add6.i.i.i.i, %agg.tmp4.sroa.0.sroa.2.0.copyload
   %add.1.i.i.i.i = add i64 %2, %agg.tmp5.sroa.0.sroa.2.0.copyload
   %add6.1.i.i.i.i = add i64 %add.1.i.i.i.i, %mul.1.i.i.i.i
-  %ptridx.i.i.i = getelementptr inbounds i32, i32 addrspace(1)* %_arg_, i64 %add6.1.i.i.i.i
-  %ptridx.ascast.i.i.i = addrspacecast i32 addrspace(1)* %ptridx.i.i.i to i32 addrspace(4)*
-  %11 = load i32, i32 addrspace(4)* %ptridx.ascast.i.i.i, align 4
+  %ptridx.i.i.i = getelementptr inbounds i32, ptr addrspace(1) %_arg_, i64 %add6.1.i.i.i.i
+  %ptridx.ascast.i.i.i = addrspacecast ptr addrspace(1) %ptridx.i.i.i to ptr addrspace(4)
+  %11 = load i32, ptr addrspace(4) %ptridx.ascast.i.i.i, align 4
   %12 = trunc i64 %add.i.i.i to i32
   %conv5.i = add i32 %11, %12
-  store i32 %conv5.i, i32 addrspace(4)* %ptridx.ascast.i.i.i, align 4
+  store i32 %conv5.i, ptr addrspace(4) %ptridx.ascast.i.i.i, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
index 3885f07023144..94da3f3450b78 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_opt.ll
@@ -43,20 +43,20 @@ $_ZTS10sycl_subgrIiLi0EE = comdat any
 @__spirv_BuiltInSubgroupMaxSize = external dso_local local_unnamed_addr addrspace(1) constant i32, align 4
 
 
-define weak_odr dso_local spir_kernel void @_ZTS10sycl_subgrIiLi0EE(i32 %_arg_, i32 addrspace(1)* %_arg_1, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_3, %"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range"* byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_4, %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id"* byval(%"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id") align 8 %_arg_5) local_unnamed_addr comdat {
+define weak_odr dso_local spir_kernel void @_ZTS10sycl_subgrIiLi0EE(i32 %_arg_, ptr addrspace(1) %_arg_1, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_3, ptr byval(%"class._ZTSN2cl4sycl5rangeILi1EEE.cl::sycl::range") align 8 %_arg_4, ptr byval(%"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id") align 8 %_arg_5) local_unnamed_addr comdat {
 entry:
-  %0 = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id", %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id"* %_arg_5, i64 0, i32 0, i32 0, i64 0
-  %1 = load i64, i64* %0, align 8
-  %add.ptr.i = getelementptr inbounds i32, i32 addrspace(1)* %_arg_1, i64 %1
+  %0 = getelementptr inbounds %"class._ZTSN2cl4sycl2idILi1EEE.cl::sycl::id", ptr %_arg_5, i64 0, i32 0, i32 0, i64 0
+  %1 = load i64, ptr %0, align 8
+  %add.ptr.i = getelementptr inbounds i32, ptr addrspace(1) %_arg_1, i64 %1
   %2 = and i32 %_arg_, 1
   %tobool.not.i = icmp eq i32 %2, 0
-  %3 = addrspacecast i32 addrspace(1)* @__spirv_BuiltInSubgroupMaxSize to i32 addrspace(4)*
+  %3 = addrspacecast ptr addrspace(1) @__spirv_BuiltInSubgroupMaxSize to ptr addrspace(4)
   br i1 %tobool.not.i, label %if.end.i, label %if.then.i
 
 if.then.i:                                        ; preds = %entry
-  %4 = load i32, i32 addrspace(4)* %3, align 4
-  %ptridx.ascast.i14.i = addrspacecast i32 addrspace(1)* %add.ptr.i to i32 addrspace(4)*
-  store i32 %4, i32 addrspace(4)* %ptridx.ascast.i14.i, align 4
+  %4 = load i32, ptr addrspace(4) %3, align 4
+  %ptridx.ascast.i14.i = addrspacecast ptr addrspace(1) %add.ptr.i to ptr addrspace(4)
+  store i32 %4, ptr addrspace(4) %ptridx.ascast.i14.i, align 4
   br label %if.end.i
 
 if.end.i:                                         ; preds = %if.then.i, %entry
@@ -65,13 +65,13 @@ if.end.i:                                         ; preds = %if.then.i, %entry
   br i1 %tobool4.not.i, label %cond.false.i, label %"_ZZZ4mainENK3$_0clERN2cl4sycl7handlerEENKUlNS1_7nd_itemILi1EEEE_clES5_.exit"
 
 cond.false.i:                                     ; preds = %if.end.i
-  %5 = load i32, i32 addrspace(4)* %3, align 4
+  %5 = load i32, ptr addrspace(4) %3, align 4
   br label %"_ZZZ4mainENK3$_0clERN2cl4sycl7handlerEENKUlNS1_7nd_itemILi1EEEE_clES5_.exit"
 
 "_ZZZ4mainENK3$_0clERN2cl4sycl7handlerEENKUlNS1_7nd_itemILi1EEEE_clES5_.exit": ; preds = %cond.false.i, %if.end.i
   %cond.i = phi i32 [ %5, %cond.false.i ], [ 1, %if.end.i ]
-  %ptridx.i.i = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr.i, i64 1
-  %ptridx.ascast.i.i = addrspacecast i32 addrspace(1)* %ptridx.i.i to i32 addrspace(4)*
-  store i32 %cond.i, i32 addrspace(4)* %ptridx.ascast.i.i, align 4
+  %ptridx.i.i = getelementptr inbounds i32, ptr addrspace(1) %add.ptr.i, i64 1
+  %ptridx.ascast.i.i = addrspacecast ptr addrspace(1) %ptridx.i.i to ptr addrspace(4)
+  store i32 %cond.i, ptr addrspace(4) %ptridx.ascast.i.i, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll b/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
index 0cd75bb215ada..ff263d96f4819 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/clk_event_t.ll
@@ -20,15 +20,15 @@
 ;;   release_event(e1);
 ;; }
 
-define dso_local spir_kernel void @clk_event_t_test(i32 addrspace(1)* nocapture noundef writeonly %res, i8 addrspace(1)* noundef %prof) local_unnamed_addr {
+define dso_local spir_kernel void @clk_event_t_test(ptr addrspace(1) nocapture noundef writeonly %res, ptr addrspace(1) noundef %prof) local_unnamed_addr {
 entry:
   %call = call spir_func target("spirv.DeviceEvent") @_Z17create_user_eventv()
   %call1 = call spir_func zeroext i1 @_Z14is_valid_event12ocl_clkevent(target("spirv.DeviceEvent") %call)
   %conv = zext i1 %call1 to i32
-  store i32 %conv, i32 addrspace(1)* %res, align 4
+  store i32 %conv, ptr addrspace(1) %res, align 4
   call spir_func void @_Z12retain_event12ocl_clkevent(target("spirv.DeviceEvent") %call)
   call spir_func void @_Z21set_user_event_status12ocl_clkeventi(target("spirv.DeviceEvent") %call, i32 noundef -42)
-  call spir_func void @_Z28capture_event_profiling_info12ocl_clkeventiPU3AS1v(target("spirv.DeviceEvent") %call, i32 noundef 1, i8 addrspace(1)* noundef %prof)
+  call spir_func void @_Z28capture_event_profiling_info12ocl_clkeventiPU3AS1v(target("spirv.DeviceEvent") %call, i32 noundef 1, ptr addrspace(1) noundef %prof)
   call spir_func void @_Z13release_event12ocl_clkevent(target("spirv.DeviceEvent") %call)
   ret void
 }
@@ -41,6 +41,6 @@ declare spir_func void @_Z12retain_event12ocl_clkevent(target("spirv.DeviceEvent
 
 declare spir_func void @_Z21set_user_event_status12ocl_clkeventi(target("spirv.DeviceEvent"), i32 noundef) local_unnamed_addr
 
-declare spir_func void @_Z28capture_event_profiling_info12ocl_clkeventiPU3AS1v(target("spirv.DeviceEvent"), i32 noundef, i8 addrspace(1)* noundef) local_unnamed_addr
+declare spir_func void @_Z28capture_event_profiling_info12ocl_clkeventiPU3AS1v(target("spirv.DeviceEvent"), i32 noundef, ptr addrspace(1) noundef) local_unnamed_addr
 
 declare spir_func void @_Z13release_event12ocl_clkevent(target("spirv.DeviceEvent")) local_unnamed_addr
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll b/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
index 84626fb3ab9bb..616fa6c5fbafb 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/enqueue_kernel.ll
@@ -121,269 +121,269 @@
 %opencl.queue_t = type opaque
 %struct.ndrange_t = type { i32 }
 %opencl.clk_event_t = type opaque
-%struct.__opencl_block_literal_generic = type { i32, i32, i8 addrspace(4)* }
+%struct.__opencl_block_literal_generic = type { i32, i32, ptr addrspace(4) }
 
- at __block_literal_global = internal addrspace(1) constant { i32, i32, i8 addrspace(4)* } { i32 12, i32 4, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*, i8 addrspace(3)*)* @__device_side_enqueue_block_invoke_3 to i8*) to i8 addrspace(4)*) }, align 4
- at __block_literal_global.1 = internal addrspace(1) constant { i32, i32, i8 addrspace(4)* } { i32 12, i32 4, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*, i8 addrspace(3)*, i8 addrspace(3)*, i8 addrspace(3)*)* @__device_side_enqueue_block_invoke_4 to i8*) to i8 addrspace(4)*) }, align 4
+ at __block_literal_global = internal addrspace(1) constant { i32, i32, ptr addrspace(4) } { i32 12, i32 4, ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_3 to ptr addrspace(4)) }, align 4
+ at __block_literal_global.1 = internal addrspace(1) constant { i32, i32, ptr addrspace(4) } { i32 12, i32 4, ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_4 to ptr addrspace(4)) }, align 4
 
-define dso_local spir_kernel void @device_side_enqueue(i32 addrspace(1)* noundef %a, i32 addrspace(1)* noundef %b, i32 noundef %i, i8 noundef signext %c0) {
+define dso_local spir_kernel void @device_side_enqueue(ptr addrspace(1) noundef %a, ptr addrspace(1) noundef %b, i32 noundef %i, i8 noundef signext %c0) {
 entry:
-  %a.addr = alloca i32 addrspace(1)*, align 4
-  %b.addr = alloca i32 addrspace(1)*, align 4
+  %a.addr = alloca ptr addrspace(1), align 4
+  %b.addr = alloca ptr addrspace(1), align 4
   %i.addr = alloca i32, align 4
   %c0.addr = alloca i8, align 1
-  %default_queue = alloca %opencl.queue_t*, align 4
+  %default_queue = alloca ptr, align 4
   %flags = alloca i32, align 4
   %ndrange = alloca %struct.ndrange_t, align 4
-  %clk_event = alloca %opencl.clk_event_t*, align 4
-  %event_wait_list = alloca %opencl.clk_event_t*, align 4
-  %event_wait_list2 = alloca [1 x %opencl.clk_event_t*], align 4
+  %clk_event = alloca ptr, align 4
+  %event_wait_list = alloca ptr, align 4
+  %event_wait_list2 = alloca [1 x ptr], align 4
   %tmp = alloca %struct.ndrange_t, align 4
-  %block = alloca <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, align 4
+  %block = alloca <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, align 4
   %tmp3 = alloca %struct.ndrange_t, align 4
-  %block4 = alloca <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, align 4
+  %block4 = alloca <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, align 4
   %c = alloca i8, align 1
   %tmp11 = alloca %struct.ndrange_t, align 4
   %block_sizes = alloca [1 x i32], align 4
   %tmp12 = alloca %struct.ndrange_t, align 4
   %block_sizes13 = alloca [3 x i32], align 4
   %tmp14 = alloca %struct.ndrange_t, align 4
-  %block15 = alloca <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, align 4
-  store i32 addrspace(1)* %a, i32 addrspace(1)** %a.addr, align 4
-  store i32 addrspace(1)* %b, i32 addrspace(1)** %b.addr, align 4
-  store i32 %i, i32* %i.addr, align 4
-  store i8 %c0, i8* %c0.addr, align 1
-  store i32 0, i32* %flags, align 4
-  %arrayinit.begin = getelementptr inbounds [1 x %opencl.clk_event_t*], [1 x %opencl.clk_event_t*]* %event_wait_list2, i32 0, i32 0
-  %0 = load %opencl.clk_event_t*, %opencl.clk_event_t** %clk_event, align 4
-  store %opencl.clk_event_t* %0, %opencl.clk_event_t** %arrayinit.begin, align 4
-  %1 = load %opencl.queue_t*, %opencl.queue_t** %default_queue, align 4
-  %2 = load i32, i32* %flags, align 4
-  %3 = bitcast %struct.ndrange_t* %tmp to i8*
-  %4 = bitcast %struct.ndrange_t* %ndrange to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %3, i8* align 4 %4, i32 4, i1 false)
-  %block.size = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>* %block, i32 0, i32 0
-  store i32 21, i32* %block.size, align 4
-  %block.align = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>* %block, i32 0, i32 1
-  store i32 4, i32* %block.align, align 4
-  %block.invoke = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>* %block, i32 0, i32 2
-  store i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* @__device_side_enqueue_block_invoke to i8*) to i8 addrspace(4)*), i8 addrspace(4)** %block.invoke, align 4
-  %block.captured = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>* %block, i32 0, i32 3
-  %5 = load i32 addrspace(1)*, i32 addrspace(1)** %a.addr, align 4
-  store i32 addrspace(1)* %5, i32 addrspace(1)** %block.captured, align 4
-  %block.captured1 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>* %block, i32 0, i32 4
-  %6 = load i32, i32* %i.addr, align 4
-  store i32 %6, i32* %block.captured1, align 4
-  %block.captured2 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>* %block, i32 0, i32 5
-  %7 = load i8, i8* %c0.addr, align 1
-  store i8 %7, i8* %block.captured2, align 4
-  %8 = bitcast <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>* %block to %struct.__opencl_block_literal_generic*
-  %9 = addrspacecast %struct.__opencl_block_literal_generic* %8 to i8 addrspace(4)*
-  %10 = call spir_func i32 @__enqueue_kernel_basic(%opencl.queue_t* %1, i32 %2, %struct.ndrange_t* byval(%struct.ndrange_t) %tmp, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* @__device_side_enqueue_block_invoke_kernel to i8*) to i8 addrspace(4)*), i8 addrspace(4)* %9)
-  %11 = load %opencl.queue_t*, %opencl.queue_t** %default_queue, align 4
-  %12 = load i32, i32* %flags, align 4
-  %13 = bitcast %struct.ndrange_t* %tmp3 to i8*
-  %14 = bitcast %struct.ndrange_t* %ndrange to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %13, i8* align 4 %14, i32 4, i1 false)
-  %15 = addrspacecast %opencl.clk_event_t** %event_wait_list to %opencl.clk_event_t* addrspace(4)*
-  %16 = addrspacecast %opencl.clk_event_t** %clk_event to %opencl.clk_event_t* addrspace(4)*
-  %block.size5 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block4, i32 0, i32 0
-  store i32 24, i32* %block.size5, align 4
-  %block.align6 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block4, i32 0, i32 1
-  store i32 4, i32* %block.align6, align 4
-  %block.invoke7 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block4, i32 0, i32 2
-  store i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* @__device_side_enqueue_block_invoke_2 to i8*) to i8 addrspace(4)*), i8 addrspace(4)** %block.invoke7, align 4
-  %block.captured8 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block4, i32 0, i32 3
-  %17 = load i32 addrspace(1)*, i32 addrspace(1)** %a.addr, align 4
-  store i32 addrspace(1)* %17, i32 addrspace(1)** %block.captured8, align 4
-  %block.captured9 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block4, i32 0, i32 4
-  %18 = load i32, i32* %i.addr, align 4
-  store i32 %18, i32* %block.captured9, align 4
-  %block.captured10 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block4, i32 0, i32 5
-  %19 = load i32 addrspace(1)*, i32 addrspace(1)** %b.addr, align 4
-  store i32 addrspace(1)* %19, i32 addrspace(1)** %block.captured10, align 4
-  %20 = bitcast <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block4 to %struct.__opencl_block_literal_generic*
-  %21 = addrspacecast %struct.__opencl_block_literal_generic* %20 to i8 addrspace(4)*
-  %22 = call spir_func i32 @__enqueue_kernel_basic_events(%opencl.queue_t* %11, i32 %12, %struct.ndrange_t* %tmp3, i32 2, %opencl.clk_event_t* addrspace(4)* %15, %opencl.clk_event_t* addrspace(4)* %16, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* @__device_side_enqueue_block_invoke_2_kernel to i8*) to i8 addrspace(4)*), i8 addrspace(4)* %21)
-  %23 = load %opencl.queue_t*, %opencl.queue_t** %default_queue, align 4
-  %24 = load i32, i32* %flags, align 4
-  %25 = bitcast %struct.ndrange_t* %tmp11 to i8*
-  %26 = bitcast %struct.ndrange_t* %ndrange to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %25, i8* align 4 %26, i32 4, i1 false)
-  %arraydecay = getelementptr inbounds [1 x %opencl.clk_event_t*], [1 x %opencl.clk_event_t*]* %event_wait_list2, i32 0, i32 0
-  %27 = addrspacecast %opencl.clk_event_t** %arraydecay to %opencl.clk_event_t* addrspace(4)*
-  %28 = addrspacecast %opencl.clk_event_t** %clk_event to %opencl.clk_event_t* addrspace(4)*
-  %29 = getelementptr [1 x i32], [1 x i32]* %block_sizes, i32 0, i32 0
-  %30 = load i8, i8* %c, align 1
+  %block15 = alloca <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, align 4
+  store ptr addrspace(1) %a, ptr %a.addr, align 4
+  store ptr addrspace(1) %b, ptr %b.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  store i8 %c0, ptr %c0.addr, align 1
+  store i32 0, ptr %flags, align 4
+  %arrayinit.begin = getelementptr inbounds [1 x ptr], ptr %event_wait_list2, i32 0, i32 0
+  %0 = load ptr, ptr %clk_event, align 4
+  store ptr %0, ptr %arrayinit.begin, align 4
+  %1 = load ptr, ptr %default_queue, align 4
+  %2 = load i32, ptr %flags, align 4
+  %3 = bitcast ptr %tmp to ptr
+  %4 = bitcast ptr %ndrange to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %3, ptr align 4 %4, i32 4, i1 false)
+  %block.size = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr %block, i32 0, i32 0
+  store i32 21, ptr %block.size, align 4
+  %block.align = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr %block, i32 0, i32 1
+  store i32 4, ptr %block.align, align 4
+  %block.invoke = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr %block, i32 0, i32 2
+  store ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke to ptr addrspace(4)), ptr %block.invoke, align 4
+  %block.captured = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr %block, i32 0, i32 3
+  %5 = load ptr addrspace(1), ptr %a.addr, align 4
+  store ptr addrspace(1) %5, ptr %block.captured, align 4
+  %block.captured1 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr %block, i32 0, i32 4
+  %6 = load i32, ptr %i.addr, align 4
+  store i32 %6, ptr %block.captured1, align 4
+  %block.captured2 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr %block, i32 0, i32 5
+  %7 = load i8, ptr %c0.addr, align 1
+  store i8 %7, ptr %block.captured2, align 4
+  %8 = bitcast ptr %block to ptr
+  %9 = addrspacecast ptr %8 to ptr addrspace(4)
+  %10 = call spir_func i32 @__enqueue_kernel_basic(ptr %1, i32 %2, ptr byval(%struct.ndrange_t) %tmp, ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_kernel to ptr addrspace(4)), ptr addrspace(4) %9)
+  %11 = load ptr, ptr %default_queue, align 4
+  %12 = load i32, ptr %flags, align 4
+  %13 = bitcast ptr %tmp3 to ptr
+  %14 = bitcast ptr %ndrange to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %13, ptr align 4 %14, i32 4, i1 false)
+  %15 = addrspacecast ptr %event_wait_list to ptr addrspace(4)
+  %16 = addrspacecast ptr %clk_event to ptr addrspace(4)
+  %block.size5 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block4, i32 0, i32 0
+  store i32 24, ptr %block.size5, align 4
+  %block.align6 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block4, i32 0, i32 1
+  store i32 4, ptr %block.align6, align 4
+  %block.invoke7 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block4, i32 0, i32 2
+  store ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_2 to ptr addrspace(4)), ptr %block.invoke7, align 4
+  %block.captured8 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block4, i32 0, i32 3
+  %17 = load ptr addrspace(1), ptr %a.addr, align 4
+  store ptr addrspace(1) %17, ptr %block.captured8, align 4
+  %block.captured9 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block4, i32 0, i32 4
+  %18 = load i32, ptr %i.addr, align 4
+  store i32 %18, ptr %block.captured9, align 4
+  %block.captured10 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block4, i32 0, i32 5
+  %19 = load ptr addrspace(1), ptr %b.addr, align 4
+  store ptr addrspace(1) %19, ptr %block.captured10, align 4
+  %20 = bitcast ptr %block4 to ptr
+  %21 = addrspacecast ptr %20 to ptr addrspace(4)
+  %22 = call spir_func i32 @__enqueue_kernel_basic_events(ptr %11, i32 %12, ptr %tmp3, i32 2, ptr addrspace(4) %15, ptr addrspace(4) %16, ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_2_kernel to ptr addrspace(4)), ptr addrspace(4) %21)
+  %23 = load ptr, ptr %default_queue, align 4
+  %24 = load i32, ptr %flags, align 4
+  %25 = bitcast ptr %tmp11 to ptr
+  %26 = bitcast ptr %ndrange to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %25, ptr align 4 %26, i32 4, i1 false)
+  %arraydecay = getelementptr inbounds [1 x ptr], ptr %event_wait_list2, i32 0, i32 0
+  %27 = addrspacecast ptr %arraydecay to ptr addrspace(4)
+  %28 = addrspacecast ptr %clk_event to ptr addrspace(4)
+  %29 = getelementptr [1 x i32], ptr %block_sizes, i32 0, i32 0
+  %30 = load i8, ptr %c, align 1
   %31 = zext i8 %30 to i32
-  store i32 %31, i32* %29, align 4
-  %32 = call spir_func i32 @__enqueue_kernel_events_varargs(%opencl.queue_t* %23, i32 %24, %struct.ndrange_t* %tmp11, i32 2, %opencl.clk_event_t* addrspace(4)* %27, %opencl.clk_event_t* addrspace(4)* %28, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*, i8 addrspace(3)*)* @__device_side_enqueue_block_invoke_3_kernel to i8*) to i8 addrspace(4)*), i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %29)
-  %33 = load %opencl.queue_t*, %opencl.queue_t** %default_queue, align 4
-  %34 = load i32, i32* %flags, align 4
-  %35 = bitcast %struct.ndrange_t* %tmp12 to i8*
-  %36 = bitcast %struct.ndrange_t* %ndrange to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %35, i8* align 4 %36, i32 4, i1 false)
-  %37 = getelementptr [3 x i32], [3 x i32]* %block_sizes13, i32 0, i32 0
-  store i32 1, i32* %37, align 4
-  %38 = getelementptr [3 x i32], [3 x i32]* %block_sizes13, i32 0, i32 1
-  store i32 2, i32* %38, align 4
-  %39 = getelementptr [3 x i32], [3 x i32]* %block_sizes13, i32 0, i32 2
-  store i32 4, i32* %39, align 4
-  %40 = call spir_func i32 @__enqueue_kernel_varargs(%opencl.queue_t* %33, i32 %34, %struct.ndrange_t* %tmp12, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*, i8 addrspace(3)*, i8 addrspace(3)*, i8 addrspace(3)*)* @__device_side_enqueue_block_invoke_4_kernel to i8*) to i8 addrspace(4)*), i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global.1 to i8 addrspace(1)*) to i8 addrspace(4)*), i32 3, i32* %37)
-  %41 = load %opencl.queue_t*, %opencl.queue_t** %default_queue, align 4
-  %42 = load i32, i32* %flags, align 4
-  %43 = bitcast %struct.ndrange_t* %tmp14 to i8*
-  %44 = bitcast %struct.ndrange_t* %ndrange to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %43, i8* align 4 %44, i32 4, i1 false)
-  %45 = addrspacecast %opencl.clk_event_t** %clk_event to %opencl.clk_event_t* addrspace(4)*
-  %block.size16 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block15, i32 0, i32 0
-  store i32 24, i32* %block.size16, align 4
-  %block.align17 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block15, i32 0, i32 1
-  store i32 4, i32* %block.align17, align 4
-  %block.invoke18 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block15, i32 0, i32 2
-  store i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* @__device_side_enqueue_block_invoke_5 to i8*) to i8 addrspace(4)*), i8 addrspace(4)** %block.invoke18, align 4
-  %block.captured19 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block15, i32 0, i32 3
-  %46 = load i32 addrspace(1)*, i32 addrspace(1)** %a.addr, align 4
-  store i32 addrspace(1)* %46, i32 addrspace(1)** %block.captured19, align 4
-  %block.captured20 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block15, i32 0, i32 4
-  %47 = load i32, i32* %i.addr, align 4
-  store i32 %47, i32* %block.captured20, align 4
-  %block.captured21 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block15, i32 0, i32 5
-  %48 = load i32 addrspace(1)*, i32 addrspace(1)** %b.addr, align 4
-  store i32 addrspace(1)* %48, i32 addrspace(1)** %block.captured21, align 4
-  %49 = bitcast <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block15 to %struct.__opencl_block_literal_generic*
-  %50 = addrspacecast %struct.__opencl_block_literal_generic* %49 to i8 addrspace(4)*
-  %51 = call spir_func i32 @__enqueue_kernel_basic_events(%opencl.queue_t* %41, i32 %42, %struct.ndrange_t* %tmp14, i32 0, %opencl.clk_event_t* addrspace(4)* null, %opencl.clk_event_t* addrspace(4)* %45, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*)* @__device_side_enqueue_block_invoke_5_kernel to i8*) to i8 addrspace(4)*), i8 addrspace(4)* %50)
+  store i32 %31, ptr %29, align 4
+  %32 = call spir_func i32 @__enqueue_kernel_events_varargs(ptr %23, i32 %24, ptr %tmp11, i32 2, ptr addrspace(4) %27, ptr addrspace(4) %28, ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_3_kernel to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(1) @__block_literal_global to ptr addrspace(4)), i32 1, ptr %29)
+  %33 = load ptr, ptr %default_queue, align 4
+  %34 = load i32, ptr %flags, align 4
+  %35 = bitcast ptr %tmp12 to ptr
+  %36 = bitcast ptr %ndrange to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %35, ptr align 4 %36, i32 4, i1 false)
+  %37 = getelementptr [3 x i32], ptr %block_sizes13, i32 0, i32 0
+  store i32 1, ptr %37, align 4
+  %38 = getelementptr [3 x i32], ptr %block_sizes13, i32 0, i32 1
+  store i32 2, ptr %38, align 4
+  %39 = getelementptr [3 x i32], ptr %block_sizes13, i32 0, i32 2
+  store i32 4, ptr %39, align 4
+  %40 = call spir_func i32 @__enqueue_kernel_varargs(ptr %33, i32 %34, ptr %tmp12, ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_4_kernel to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(1) @__block_literal_global.1 to ptr addrspace(4)), i32 3, ptr %37)
+  %41 = load ptr, ptr %default_queue, align 4
+  %42 = load i32, ptr %flags, align 4
+  %43 = bitcast ptr %tmp14 to ptr
+  %44 = bitcast ptr %ndrange to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %43, ptr align 4 %44, i32 4, i1 false)
+  %45 = addrspacecast ptr %clk_event to ptr addrspace(4)
+  %block.size16 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block15, i32 0, i32 0
+  store i32 24, ptr %block.size16, align 4
+  %block.align17 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block15, i32 0, i32 1
+  store i32 4, ptr %block.align17, align 4
+  %block.invoke18 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block15, i32 0, i32 2
+  store ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_5 to ptr addrspace(4)), ptr %block.invoke18, align 4
+  %block.captured19 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block15, i32 0, i32 3
+  %46 = load ptr addrspace(1), ptr %a.addr, align 4
+  store ptr addrspace(1) %46, ptr %block.captured19, align 4
+  %block.captured20 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block15, i32 0, i32 4
+  %47 = load i32, ptr %i.addr, align 4
+  store i32 %47, ptr %block.captured20, align 4
+  %block.captured21 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr %block15, i32 0, i32 5
+  %48 = load ptr addrspace(1), ptr %b.addr, align 4
+  store ptr addrspace(1) %48, ptr %block.captured21, align 4
+  %49 = bitcast ptr %block15 to ptr
+  %50 = addrspacecast ptr %49 to ptr addrspace(4)
+  %51 = call spir_func i32 @__enqueue_kernel_basic_events(ptr %41, i32 %42, ptr %tmp14, i32 0, ptr addrspace(4) null, ptr addrspace(4) %45, ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_5_kernel to ptr addrspace(4)), ptr addrspace(4) %50)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
 
-define internal spir_func void @__device_side_enqueue_block_invoke(i8 addrspace(4)* noundef %.block_descriptor) {
+define internal spir_func void @__device_side_enqueue_block_invoke(ptr addrspace(4) noundef %.block_descriptor) {
 entry:
-  %.block_descriptor.addr = alloca i8 addrspace(4)*, align 4
-  %block.addr = alloca <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }> addrspace(4)*, align 4
-  store i8 addrspace(4)* %.block_descriptor, i8 addrspace(4)** %.block_descriptor.addr, align 4
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }> addrspace(4)*
-  store <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }> addrspace(4)* %block, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }> addrspace(4)** %block.addr, align 4
-  %block.capture.addr = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }> addrspace(4)* %block, i32 0, i32 5
-  %0 = load i8, i8 addrspace(4)* %block.capture.addr, align 4
+  %.block_descriptor.addr = alloca ptr addrspace(4), align 4
+  %block.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %.block_descriptor, ptr %.block_descriptor.addr, align 4
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
+  store ptr addrspace(4) %block, ptr %block.addr, align 4
+  %block.capture.addr = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr addrspace(4) %block, i32 0, i32 5
+  %0 = load i8, ptr addrspace(4) %block.capture.addr, align 4
   %conv = sext i8 %0 to i32
-  %block.capture.addr1 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }> addrspace(4)* %block, i32 0, i32 3
-  %1 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %block.capture.addr1, align 4
-  %block.capture.addr2 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i8 }> addrspace(4)* %block, i32 0, i32 4
-  %2 = load i32, i32 addrspace(4)* %block.capture.addr2, align 4
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %1, i32 %2
-  store i32 %conv, i32 addrspace(1)* %arrayidx, align 4
+  %block.capture.addr1 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr addrspace(4) %block, i32 0, i32 3
+  %1 = load ptr addrspace(1), ptr addrspace(4) %block.capture.addr1, align 4
+  %block.capture.addr2 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, i8 }>, ptr addrspace(4) %block, i32 0, i32 4
+  %2 = load i32, ptr addrspace(4) %block.capture.addr2, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %1, i32 %2
+  store i32 %conv, ptr addrspace(1) %arrayidx, align 4
   ret void
 }
 
-define spir_kernel void @__device_side_enqueue_block_invoke_kernel(i8 addrspace(4)* %0) {
+define spir_kernel void @__device_side_enqueue_block_invoke_kernel(ptr addrspace(4) %0) {
 entry:
-  call spir_func void @__device_side_enqueue_block_invoke(i8 addrspace(4)* %0)
+  call spir_func void @__device_side_enqueue_block_invoke(ptr addrspace(4) %0)
   ret void
 }
 
-declare spir_func i32 @__enqueue_kernel_basic(%opencl.queue_t*, i32, %struct.ndrange_t*, i8 addrspace(4)*, i8 addrspace(4)*)
+declare spir_func i32 @__enqueue_kernel_basic(ptr, i32, ptr, ptr addrspace(4), ptr addrspace(4))
 
-define internal spir_func void @__device_side_enqueue_block_invoke_2(i8 addrspace(4)* noundef %.block_descriptor) {
+define internal spir_func void @__device_side_enqueue_block_invoke_2(ptr addrspace(4) noundef %.block_descriptor) {
 entry:
-  %.block_descriptor.addr = alloca i8 addrspace(4)*, align 4
-  %block.addr = alloca <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)*, align 4
-  store i8 addrspace(4)* %.block_descriptor, i8 addrspace(4)** %.block_descriptor.addr, align 4
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)*
-  store <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)** %block.addr, align 4
-  %block.capture.addr = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, i32 0, i32 5
-  %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %block.capture.addr, align 4
-  %block.capture.addr1 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, i32 0, i32 4
-  %1 = load i32, i32 addrspace(4)* %block.capture.addr1, align 4
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i32 %1
-  %2 = load i32, i32 addrspace(1)* %arrayidx, align 4
-  %block.capture.addr2 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, i32 0, i32 3
-  %3 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %block.capture.addr2, align 4
-  %block.capture.addr3 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, i32 0, i32 4
-  %4 = load i32, i32 addrspace(4)* %block.capture.addr3, align 4
-  %arrayidx4 = getelementptr inbounds i32, i32 addrspace(1)* %3, i32 %4
-  store i32 %2, i32 addrspace(1)* %arrayidx4, align 4
+  %.block_descriptor.addr = alloca ptr addrspace(4), align 4
+  %block.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %.block_descriptor, ptr %.block_descriptor.addr, align 4
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
+  store ptr addrspace(4) %block, ptr %block.addr, align 4
+  %block.capture.addr = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) %block, i32 0, i32 5
+  %0 = load ptr addrspace(1), ptr addrspace(4) %block.capture.addr, align 4
+  %block.capture.addr1 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) %block, i32 0, i32 4
+  %1 = load i32, ptr addrspace(4) %block.capture.addr1, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %0, i32 %1
+  %2 = load i32, ptr addrspace(1) %arrayidx, align 4
+  %block.capture.addr2 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) %block, i32 0, i32 3
+  %3 = load ptr addrspace(1), ptr addrspace(4) %block.capture.addr2, align 4
+  %block.capture.addr3 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) %block, i32 0, i32 4
+  %4 = load i32, ptr addrspace(4) %block.capture.addr3, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr addrspace(1) %3, i32 %4
+  store i32 %2, ptr addrspace(1) %arrayidx4, align 4
   ret void
 }
 
-define spir_kernel void @__device_side_enqueue_block_invoke_2_kernel(i8 addrspace(4)* %0) {
+define spir_kernel void @__device_side_enqueue_block_invoke_2_kernel(ptr addrspace(4) %0) {
 entry:
-  call spir_func void @__device_side_enqueue_block_invoke_2(i8 addrspace(4)* %0)
+  call spir_func void @__device_side_enqueue_block_invoke_2(ptr addrspace(4) %0)
   ret void
 }
 
-declare spir_func i32 @__enqueue_kernel_basic_events(%opencl.queue_t*, i32, %struct.ndrange_t*, i32, %opencl.clk_event_t* addrspace(4)*, %opencl.clk_event_t* addrspace(4)*, i8 addrspace(4)*, i8 addrspace(4)*)
+declare spir_func i32 @__enqueue_kernel_basic_events(ptr, i32, ptr, i32, ptr addrspace(4), ptr addrspace(4), ptr addrspace(4), ptr addrspace(4))
 
-define internal spir_func void @__device_side_enqueue_block_invoke_3(i8 addrspace(4)* noundef %.block_descriptor, i8 addrspace(3)* noundef %p) {
+define internal spir_func void @__device_side_enqueue_block_invoke_3(ptr addrspace(4) noundef %.block_descriptor, ptr addrspace(3) noundef %p) {
 entry:
-  %.block_descriptor.addr = alloca i8 addrspace(4)*, align 4
-  %p.addr = alloca i8 addrspace(3)*, align 4
-  %block.addr = alloca <{ i32, i32, i8 addrspace(4)* }> addrspace(4)*, align 4
-  store i8 addrspace(4)* %.block_descriptor, i8 addrspace(4)** %.block_descriptor.addr, align 4
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)* }> addrspace(4)*
-  store i8 addrspace(3)* %p, i8 addrspace(3)** %p.addr, align 4
-  store <{ i32, i32, i8 addrspace(4)* }> addrspace(4)* %block, <{ i32, i32, i8 addrspace(4)* }> addrspace(4)** %block.addr, align 4
+  %.block_descriptor.addr = alloca ptr addrspace(4), align 4
+  %p.addr = alloca ptr addrspace(3), align 4
+  %block.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %.block_descriptor, ptr %.block_descriptor.addr, align 4
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
+  store ptr addrspace(3) %p, ptr %p.addr, align 4
+  store ptr addrspace(4) %block, ptr %block.addr, align 4
   ret void
 }
 
-define spir_kernel void @__device_side_enqueue_block_invoke_3_kernel(i8 addrspace(4)* %0, i8 addrspace(3)* %1) {
+define spir_kernel void @__device_side_enqueue_block_invoke_3_kernel(ptr addrspace(4) %0, ptr addrspace(3) %1) {
 entry:
-  call spir_func void @__device_side_enqueue_block_invoke_3(i8 addrspace(4)* %0, i8 addrspace(3)* %1)
+  call spir_func void @__device_side_enqueue_block_invoke_3(ptr addrspace(4) %0, ptr addrspace(3) %1)
   ret void
 }
 
-declare spir_func i32 @__enqueue_kernel_events_varargs(%opencl.queue_t*, i32, %struct.ndrange_t*, i32, %opencl.clk_event_t* addrspace(4)*, %opencl.clk_event_t* addrspace(4)*, i8 addrspace(4)*, i8 addrspace(4)*, i32, i32*)
+declare spir_func i32 @__enqueue_kernel_events_varargs(ptr, i32, ptr, i32, ptr addrspace(4), ptr addrspace(4), ptr addrspace(4), ptr addrspace(4), i32, ptr)
 
-define internal spir_func void @__device_side_enqueue_block_invoke_4(i8 addrspace(4)* noundef %.block_descriptor, i8 addrspace(3)* noundef %p1, i8 addrspace(3)* noundef %p2, i8 addrspace(3)* noundef %p3) {
+define internal spir_func void @__device_side_enqueue_block_invoke_4(ptr addrspace(4) noundef %.block_descriptor, ptr addrspace(3) noundef %p1, ptr addrspace(3) noundef %p2, ptr addrspace(3) noundef %p3) {
 entry:
-  %.block_descriptor.addr = alloca i8 addrspace(4)*, align 4
-  %p1.addr = alloca i8 addrspace(3)*, align 4
-  %p2.addr = alloca i8 addrspace(3)*, align 4
-  %p3.addr = alloca i8 addrspace(3)*, align 4
-  %block.addr = alloca <{ i32, i32, i8 addrspace(4)* }> addrspace(4)*, align 4
-  store i8 addrspace(4)* %.block_descriptor, i8 addrspace(4)** %.block_descriptor.addr, align 4
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)* }> addrspace(4)*
-  store i8 addrspace(3)* %p1, i8 addrspace(3)** %p1.addr, align 4
-  store i8 addrspace(3)* %p2, i8 addrspace(3)** %p2.addr, align 4
-  store i8 addrspace(3)* %p3, i8 addrspace(3)** %p3.addr, align 4
-  store <{ i32, i32, i8 addrspace(4)* }> addrspace(4)* %block, <{ i32, i32, i8 addrspace(4)* }> addrspace(4)** %block.addr, align 4
+  %.block_descriptor.addr = alloca ptr addrspace(4), align 4
+  %p1.addr = alloca ptr addrspace(3), align 4
+  %p2.addr = alloca ptr addrspace(3), align 4
+  %p3.addr = alloca ptr addrspace(3), align 4
+  %block.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %.block_descriptor, ptr %.block_descriptor.addr, align 4
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
+  store ptr addrspace(3) %p1, ptr %p1.addr, align 4
+  store ptr addrspace(3) %p2, ptr %p2.addr, align 4
+  store ptr addrspace(3) %p3, ptr %p3.addr, align 4
+  store ptr addrspace(4) %block, ptr %block.addr, align 4
   ret void
 }
 
-define spir_kernel void @__device_side_enqueue_block_invoke_4_kernel(i8 addrspace(4)* %0, i8 addrspace(3)* %1, i8 addrspace(3)* %2, i8 addrspace(3)* %3) {
+define spir_kernel void @__device_side_enqueue_block_invoke_4_kernel(ptr addrspace(4) %0, ptr addrspace(3) %1, ptr addrspace(3) %2, ptr addrspace(3) %3) {
 entry:
-  call spir_func void @__device_side_enqueue_block_invoke_4(i8 addrspace(4)* %0, i8 addrspace(3)* %1, i8 addrspace(3)* %2, i8 addrspace(3)* %3)
+  call spir_func void @__device_side_enqueue_block_invoke_4(ptr addrspace(4) %0, ptr addrspace(3) %1, ptr addrspace(3) %2, ptr addrspace(3) %3)
   ret void
 }
 
-declare spir_func i32 @__enqueue_kernel_varargs(%opencl.queue_t*, i32, %struct.ndrange_t*, i8 addrspace(4)*, i8 addrspace(4)*, i32, i32*)
+declare spir_func i32 @__enqueue_kernel_varargs(ptr, i32, ptr, ptr addrspace(4), ptr addrspace(4), i32, ptr)
 
-define internal spir_func void @__device_side_enqueue_block_invoke_5(i8 addrspace(4)* noundef %.block_descriptor) {
+define internal spir_func void @__device_side_enqueue_block_invoke_5(ptr addrspace(4) noundef %.block_descriptor) {
 entry:
-  %.block_descriptor.addr = alloca i8 addrspace(4)*, align 4
-  %block.addr = alloca <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)*, align 4
-  store i8 addrspace(4)* %.block_descriptor, i8 addrspace(4)** %.block_descriptor.addr, align 4
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)*
-  store <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)** %block.addr, align 4
-  %block.capture.addr = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, i32 0, i32 5
-  %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %block.capture.addr, align 4
-  %block.capture.addr1 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, i32 0, i32 4
-  %1 = load i32, i32 addrspace(4)* %block.capture.addr1, align 4
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %0, i32 %1
-  %2 = load i32, i32 addrspace(1)* %arrayidx, align 4
-  %block.capture.addr2 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, i32 0, i32 3
-  %3 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(4)* %block.capture.addr2, align 4
-  %block.capture.addr3 = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>, <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }> addrspace(4)* %block, i32 0, i32 4
-  %4 = load i32, i32 addrspace(4)* %block.capture.addr3, align 4
-  %arrayidx4 = getelementptr inbounds i32, i32 addrspace(1)* %3, i32 %4
-  store i32 %2, i32 addrspace(1)* %arrayidx4, align 4
+  %.block_descriptor.addr = alloca ptr addrspace(4), align 4
+  %block.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %.block_descriptor, ptr %.block_descriptor.addr, align 4
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
+  store ptr addrspace(4) %block, ptr %block.addr, align 4
+  %block.capture.addr = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) %block, i32 0, i32 5
+  %0 = load ptr addrspace(1), ptr addrspace(4) %block.capture.addr, align 4
+  %block.capture.addr1 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) %block, i32 0, i32 4
+  %1 = load i32, ptr addrspace(4) %block.capture.addr1, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %0, i32 %1
+  %2 = load i32, ptr addrspace(1) %arrayidx, align 4
+  %block.capture.addr2 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) %block, i32 0, i32 3
+  %3 = load ptr addrspace(1), ptr addrspace(4) %block.capture.addr2, align 4
+  %block.capture.addr3 = getelementptr inbounds <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) %block, i32 0, i32 4
+  %4 = load i32, ptr addrspace(4) %block.capture.addr3, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr addrspace(1) %3, i32 %4
+  store i32 %2, ptr addrspace(1) %arrayidx4, align 4
   ret void
 }
 
-define spir_kernel void @__device_side_enqueue_block_invoke_5_kernel(i8 addrspace(4)* %0) {
+define spir_kernel void @__device_side_enqueue_block_invoke_5_kernel(ptr addrspace(4) %0) {
 entry:
-  call spir_func void @__device_side_enqueue_block_invoke_5(i8 addrspace(4)* %0)
+  call spir_func void @__device_side_enqueue_block_invoke_5(ptr addrspace(4) %0)
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll b/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
index 49b84c1e9530a..cf64cf11e8a82 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/explicit-conversions.ll
@@ -7,11 +7,11 @@
 ;;   res[0] = convert_uchar2_sat(*a);
 ;; }
 
-define dso_local spir_kernel void @testSToU(<2 x i32> addrspace(1)* nocapture noundef readonly %a, <2 x i8> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testSToU(ptr addrspace(1) nocapture noundef readonly %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %0 = load <2 x i32>, <2 x i32> addrspace(1)* %a, align 8
+  %0 = load <2 x i32>, ptr addrspace(1) %a, align 8
   %call = call spir_func <2 x i8> @_Z18convert_uchar2_satDv2_i(<2 x i32> noundef %0)
-  store <2 x i8> %call, <2 x i8> addrspace(1)* %res, align 2
+  store <2 x i8> %call, ptr addrspace(1) %res, align 2
   ret void
 }
 
@@ -23,11 +23,11 @@ declare spir_func <2 x i8> @_Z18convert_uchar2_satDv2_i(<2 x i32> noundef) local
 ;;   res[0] = convert_char2_sat(*a);
 ;; }
 
-define dso_local spir_kernel void @testUToS(<2 x i32> addrspace(1)* nocapture noundef readonly %a, <2 x i8> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testUToS(ptr addrspace(1) nocapture noundef readonly %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %0 = load <2 x i32>, <2 x i32> addrspace(1)* %a, align 8
+  %0 = load <2 x i32>, ptr addrspace(1) %a, align 8
   %call = call spir_func <2 x i8> @_Z17convert_char2_satDv2_j(<2 x i32> noundef %0)
-  store <2 x i8> %call, <2 x i8> addrspace(1)* %res, align 2
+  store <2 x i8> %call, ptr addrspace(1) %res, align 2
   ret void
 }
 
@@ -39,11 +39,11 @@ declare spir_func <2 x i8> @_Z17convert_char2_satDv2_j(<2 x i32> noundef) local_
 ;;   res[0] = convert_float2_rtz(*a);
 ;; }
 
-define dso_local spir_kernel void @testUToF(<2 x i32> addrspace(1)* nocapture noundef readonly %a, <2 x float> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testUToF(ptr addrspace(1) nocapture noundef readonly %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %0 = load <2 x i32>, <2 x i32> addrspace(1)* %a, align 8
+  %0 = load <2 x i32>, ptr addrspace(1) %a, align 8
   %call = call spir_func <2 x float> @_Z18convert_float2_rtzDv2_j(<2 x i32> noundef %0)
-  store <2 x float> %call, <2 x float> addrspace(1)* %res, align 8
+  store <2 x float> %call, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -55,11 +55,11 @@ declare spir_func <2 x float> @_Z18convert_float2_rtzDv2_j(<2 x i32> noundef) lo
 ;;   res[0] = convert_uint2_sat_rtn(*a);
 ;; }
 
-define dso_local spir_kernel void @testFToUSat(<2 x float> addrspace(1)* nocapture noundef readonly %a, <2 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFToUSat(ptr addrspace(1) nocapture noundef readonly %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %0 = load <2 x float>, <2 x float> addrspace(1)* %a, align 8
+  %0 = load <2 x float>, ptr addrspace(1) %a, align 8
   %call = call spir_func <2 x i32> @_Z21convert_uint2_sat_rtnDv2_f(<2 x float> noundef %0)
-  store <2 x i32> %call, <2 x i32> addrspace(1)* %res, align 8
+  store <2 x i32> %call, ptr addrspace(1) %res, align 8
   ret void
 }
 
@@ -71,11 +71,11 @@ declare spir_func <2 x i32> @_Z21convert_uint2_sat_rtnDv2_f(<2 x float> noundef)
 ;;   res[0] = convert_uint_sat(*a);
 ;; }
 
-define dso_local spir_kernel void @testUToUSat(i8 addrspace(1)* nocapture noundef readonly %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testUToUSat(ptr addrspace(1) nocapture noundef readonly %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %0 = load i8, i8 addrspace(1)* %a, align 1
+  %0 = load i8, ptr addrspace(1) %a, align 1
   %call = call spir_func i32 @_Z16convert_uint_sath(i8 noundef zeroext %0)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -87,11 +87,11 @@ declare spir_func i32 @_Z16convert_uint_sath(i8 noundef zeroext) local_unnamed_a
 ;;   res[0] = convert_uchar_sat(*a);
 ;; }
 
-define dso_local spir_kernel void @testUToUSat1(i32 addrspace(1)* nocapture noundef readonly %a, i8 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testUToUSat1(ptr addrspace(1) nocapture noundef readonly %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %0 = load i32, i32 addrspace(1)* %a, align 4
+  %0 = load i32, ptr addrspace(1) %a, align 4
   %call = call spir_func zeroext i8 @_Z17convert_uchar_satj(i32 noundef %0)
-  store i8 %call, i8 addrspace(1)* %res, align 1
+  store i8 %call, ptr addrspace(1) %res, align 1
   ret void
 }
 
@@ -103,15 +103,15 @@ declare spir_func zeroext i8 @_Z17convert_uchar_satj(i32 noundef) local_unnamed_
 ;;   res[0] = convert_uint3_rtp(*a);
 ;; }
 
-define dso_local spir_kernel void @testFToU(<3 x float> addrspace(1)* nocapture noundef readonly %a, <3 x i32> addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testFToU(ptr addrspace(1) nocapture noundef readonly %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %castToVec4 = bitcast <3 x float> addrspace(1)* %a to <4 x float> addrspace(1)*
-  %loadVec4 = load <4 x float>, <4 x float> addrspace(1)* %castToVec4, align 16
+  %castToVec4 = bitcast ptr addrspace(1) %a to ptr addrspace(1)
+  %loadVec4 = load <4 x float>, ptr addrspace(1) %castToVec4, align 16
   %extractVec = shufflevector <4 x float> %loadVec4, <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
   %call = call spir_func <3 x i32> @_Z17convert_uint3_rtpDv3_f(<3 x float> noundef %extractVec)
   %extractVec1 = shufflevector <3 x i32> %call, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
-  %storetmp = bitcast <3 x i32> addrspace(1)* %res to <4 x i32> addrspace(1)*
-  store <4 x i32> %extractVec1, <4 x i32> addrspace(1)* %storetmp, align 16
+  %storetmp = bitcast ptr addrspace(1) %res to ptr addrspace(1)
+  store <4 x i32> %extractVec1, ptr addrspace(1) %storetmp, align 16
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll b/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
index 0ed1dc76628ca..1b305b93e8a9d 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/extract_insert_value.ll
@@ -36,15 +36,15 @@
 ; CHECK-SPIRV:        OpStore %[[#store_ptr]] %[[#inserted_array]]
 ; CHECK-SPIRV-LABEL:  OpFunctionEnd
 
-define spir_func void @array_test(%struct.arr addrspace(1)* %object) {
+define spir_func void @array_test(ptr addrspace(1) %object) {
 entry:
-  %0 = getelementptr inbounds %struct.arr, %struct.arr addrspace(1)* %object, i32 0, i32 0
-  %1 = load [7 x float], [7 x float] addrspace(1)* %0, align 4
+  %0 = getelementptr inbounds %struct.arr, ptr addrspace(1) %object, i32 0, i32 0
+  %1 = load [7 x float], ptr addrspace(1) %0, align 4
   %2 = extractvalue [7 x float] %1, 4
   %3 = extractvalue [7 x float] %1, 2
   %4 = fadd float %2, %3
   %5 = insertvalue [7 x float] %1, float %4, 5
-  store [7 x float] %5, [7 x float] addrspace(1)* %0
+  store [7 x float] %5, ptr addrspace(1) %0
   ret void
 }
 
@@ -58,13 +58,13 @@ entry:
 ; CHECK-SPIRV:        OpStore %[[#store1_ptr]] %[[#inserted_struct]]
 ; CHECK-SPIRV-LABEL:  OpFunctionEnd
 
-define spir_func void @struct_test(%struct.st addrspace(1)* %object) {
+define spir_func void @struct_test(ptr addrspace(1) %object) {
 entry:
-  %0 = getelementptr inbounds %struct.st, %struct.st addrspace(1)* %object, i32 0, i32 0
-  %1 = load %struct.inner, %struct.inner addrspace(1)* %0, align 4
+  %0 = getelementptr inbounds %struct.st, ptr addrspace(1) %object, i32 0, i32 0
+  %1 = load %struct.inner, ptr addrspace(1) %0, align 4
   %2 = extractvalue %struct.inner %1, 0
   %3 = fadd float %2, 1.000000e+00
   %4 = insertvalue %struct.inner %1, float %3, 0
-  store %struct.inner %4, %struct.inner addrspace(1)* %0
+  store %struct.inner %4, ptr addrspace(1) %0
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll b/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
index ff73eb4c2eb5d..34e64e6f47438 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fadd.ll
@@ -21,19 +21,19 @@
 define spir_kernel void @testFAdd_float(float %a, float %b, ptr addrspace(1) %out) {
 entry:
   %r1 = fadd float %a, %b
-  store volatile float %r1, float addrspace(1)* %out
+  store volatile float %r1, ptr addrspace(1) %out
   %r2 = fadd nnan float %a, %b
-  store volatile float %r2, float addrspace(1)* %out
+  store volatile float %r2, ptr addrspace(1) %out
   %r3 = fadd ninf float %a, %b
-  store volatile float %r3, float addrspace(1)* %out
+  store volatile float %r3, ptr addrspace(1) %out
   %r4 = fadd nsz float %a, %b
-  store volatile float %r4, float addrspace(1)* %out
+  store volatile float %r4, ptr addrspace(1) %out
   %r5 = fadd arcp float %a, %b
-  store volatile float %r5, float addrspace(1)* %out
+  store volatile float %r5, ptr addrspace(1) %out
   %r6 = fadd fast float %a, %b
-  store volatile float %r6, float addrspace(1)* %out
+  store volatile float %r6, ptr addrspace(1) %out
   %r7 = fadd nnan ninf float %a, %b
-  store volatile float %r7, float addrspace(1)* %out
+  store volatile float %r7, ptr addrspace(1) %out
   ret void
 }
 
@@ -45,21 +45,21 @@ entry:
 ; CHECK-SPIRV:     %[[#]] = OpFAdd %[[#double]]
 ; CHECK-SPIRV:     %[[#]] = OpFAdd %[[#double]]
 
-define spir_kernel void @testFAdd_double(double %a, double %b, double addrspace(1)* %out) local_unnamed_addr {
+define spir_kernel void @testFAdd_double(double %a, double %b, ptr addrspace(1) %out) local_unnamed_addr {
 entry:
   %r11 = fadd double %a, %b
-  store volatile double %r11, double addrspace(1)* %out
+  store volatile double %r11, ptr addrspace(1) %out
   %r12 = fadd nnan double %a, %b
-  store volatile double %r12, double addrspace(1)* %out
+  store volatile double %r12, ptr addrspace(1) %out
   %r13 = fadd ninf double %a, %b
-  store volatile double %r13, double addrspace(1)* %out
+  store volatile double %r13, ptr addrspace(1) %out
   %r14 = fadd nsz double %a, %b
-  store volatile double %r14, double addrspace(1)* %out
+  store volatile double %r14, ptr addrspace(1) %out
   %r15 = fadd arcp double %a, %b
-  store volatile double %r15, double addrspace(1)* %out
+  store volatile double %r15, ptr addrspace(1) %out
   %r16 = fadd fast double %a, %b
-  store volatile double %r16, double addrspace(1)* %out
+  store volatile double %r16, ptr addrspace(1) %out
   %r17 = fadd nnan ninf double %a, %b
-  store volatile double %r17, double addrspace(1)* %out
+  store volatile double %r17, ptr addrspace(1) %out
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll b/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
index 550ec1a6f2550..b8ffeb2b3ea0f 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fclamp.ll
@@ -4,11 +4,11 @@
 ; CHECK-SPIRV:     %[[#]] = OpExtInst %[[#]] %[[#]] fclamp
 ; CHECK-SPIRV-NOT: %[[#]] = OpExtInst %[[#]] %[[#]] clamp
 
-define spir_kernel void @test_scalar(float addrspace(1)* nocapture readonly %f) {
+define spir_kernel void @test_scalar(ptr addrspace(1) nocapture readonly %f) {
 entry:
-  %0 = load float, float addrspace(1)* %f, align 4
+  %0 = load float, ptr addrspace(1) %f, align 4
   %call = tail call spir_func float @_Z5clampfff(float %0, float 0.000000e+00, float 1.000000e+00)
-  %1 = load float, float addrspace(1)* %f, align 4
+  %1 = load float, ptr addrspace(1) %f, align 4
   %conv = fptrunc float %1 to half
   %call1 = tail call spir_func half @_Z5clampDhDhDh(half %conv, half %conv, half %conv)
   ret void
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll b/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
index 1d04abd4d6508..5845bc84a925d 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fdiv.ll
@@ -17,21 +17,21 @@
 ; CHECK-SPIRV:     %[[#r6]] = OpFDiv %[[#float]]
 ; CHECK-SPIRV:     %[[#r7]] = OpFDiv %[[#float]]
 
-define spir_kernel void @testFDiv(float %a, float %b, float addrspace(1)* %out) local_unnamed_addr {
+define spir_kernel void @testFDiv(float %a, float %b, ptr addrspace(1) %out) local_unnamed_addr {
 entry:
   %r1 = fdiv float %a, %b
-  store volatile float %r1, float addrspace(1)* %out
+  store volatile float %r1, ptr addrspace(1) %out
   %r2 = fdiv nnan float %a, %b
-  store volatile float %r2, float addrspace(1)* %out
+  store volatile float %r2, ptr addrspace(1) %out
   %r3 = fdiv ninf float %a, %b
-  store volatile float %r3, float addrspace(1)* %out
+  store volatile float %r3, ptr addrspace(1) %out
   %r4 = fdiv nsz float %a, %b
-  store volatile float %r4, float addrspace(1)* %out
+  store volatile float %r4, ptr addrspace(1) %out
   %r5 = fdiv arcp float %a, %b
-  store volatile float %r5, float addrspace(1)* %out
+  store volatile float %r5, ptr addrspace(1) %out
   %r6 = fdiv fast float %a, %b
-  store volatile float %r6, float addrspace(1)* %out
+  store volatile float %r6, ptr addrspace(1) %out
   %r7 = fdiv nnan ninf float %a, %b
-  store volatile float %r7, float addrspace(1)* %out
+  store volatile float %r7, ptr addrspace(1) %out
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll b/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
index 4745124802d96..a48678a61cb04 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fmul.ll
@@ -17,21 +17,21 @@
 ; CHECK-SPIRV:     %[[#r6]] = OpFMul %[[#float]]
 ; CHECK-SPIRV:     %[[#r7]] = OpFMul %[[#float]]
 
-define spir_kernel void @testFMul(float %a, float %b, float addrspace(1)* %out) local_unnamed_addr {
+define spir_kernel void @testFMul(float %a, float %b, ptr addrspace(1) %out) local_unnamed_addr {
 entry:
   %r1 = fmul float %a, %b
-  store volatile float %r1, float addrspace(1)* %out
+  store volatile float %r1, ptr addrspace(1) %out
   %r2 = fmul nnan float %a, %b
-  store volatile float %r2, float addrspace(1)* %out
+  store volatile float %r2, ptr addrspace(1) %out
   %r3 = fmul ninf float %a, %b
-  store volatile float %r3, float addrspace(1)* %out
+  store volatile float %r3, ptr addrspace(1) %out
   %r4 = fmul nsz float %a, %b
-  store volatile float %r4, float addrspace(1)* %out
+  store volatile float %r4, ptr addrspace(1) %out
   %r5 = fmul arcp float %a, %b
-  store volatile float %r5, float addrspace(1)* %out
+  store volatile float %r5, ptr addrspace(1) %out
   %r6 = fmul fast float %a, %b
-  store volatile float %r6, float addrspace(1)* %out
+  store volatile float %r6, ptr addrspace(1) %out
   %r7 = fmul nnan ninf float %a, %b
-  store volatile float %r7, float addrspace(1)* %out
+  store volatile float %r7, ptr addrspace(1) %out
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll b/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
index 21947517694f2..107bb209823cd 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fneg.ll
@@ -11,21 +11,21 @@
 ; CHECK-SPIRV: %[[#r6:]] = OpFNegate %[[#float]]
 ; CHECK-SPIRV: %[[#r7:]] = OpFNegate %[[#float]]
 
-define spir_kernel void @testFNeg(float %a, float addrspace(1)* %out) local_unnamed_addr {
+define spir_kernel void @testFNeg(float %a, ptr addrspace(1) %out) local_unnamed_addr {
 entry:
   %r1 = fneg float %a
-  store volatile float %r1, float addrspace(1)* %out
+  store volatile float %r1, ptr addrspace(1) %out
   %r2 = fneg nnan float %a
-  store volatile float %r2, float addrspace(1)* %out
+  store volatile float %r2, ptr addrspace(1) %out
   %r3 = fneg ninf float %a
-  store volatile float %r3, float addrspace(1)* %out
+  store volatile float %r3, ptr addrspace(1) %out
   %r4 = fneg nsz float %a
-  store volatile float %r4, float addrspace(1)* %out
+  store volatile float %r4, ptr addrspace(1) %out
   %r5 = fneg arcp float %a
-  store volatile float %r5, float addrspace(1)* %out
+  store volatile float %r5, ptr addrspace(1) %out
   %r6 = fneg fast float %a
-  store volatile float %r6, float addrspace(1)* %out
+  store volatile float %r6, ptr addrspace(1) %out
   %r7 = fneg nnan ninf float %a
-  store volatile float %r7, float addrspace(1)* %out
+  store volatile float %r7, ptr addrspace(1) %out
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll b/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
index 307fc1e49ecbc..8582a79f5042b 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fp_contract_reassoc_fast_mode.ll
@@ -9,15 +9,15 @@ define spir_kernel void @test(float %a, float %b) {
 entry:
   %a.addr = alloca float, align 4
   %b.addr = alloca float, align 4
-  store float %a, float* %a.addr, align 4
-  store float %b, float* %b.addr, align 4
-  %0 = load float, float* %a.addr, align 4
-  %1 = load float, float* %a.addr, align 4
+  store float %a, ptr %a.addr, align 4
+  store float %b, ptr %b.addr, align 4
+  %0 = load float, ptr %a.addr, align 4
+  %1 = load float, ptr %a.addr, align 4
   %mul = fmul contract float %0, %1
-  store float %mul, float* %b.addr, align 4
-  %2 = load float, float* %b.addr, align 4
-  %3 = load float, float* %b.addr, align 4
+  store float %mul, ptr %b.addr, align 4
+  %2 = load float, ptr %b.addr, align 4
+  %3 = load float, ptr %b.addr, align 4
   %sub = fsub reassoc float %2, %3
-  store float %sub, float* %b.addr, align 4
+  store float %sub, ptr %b.addr, align 4
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/frem.ll b/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
index f07a3a2d6f075..3c4d33a8f8798 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/frem.ll
@@ -17,21 +17,21 @@
 ; CHECK-SPIRV:     %[[#r6]] = OpFRem %[[#float]]
 ; CHECK-SPIRV:     %[[#r7]] = OpFRem %[[#float]]
 
-define spir_kernel void @testFRem(float %a, float %b, float addrspace(1)* %out) local_unnamed_addr {
+define spir_kernel void @testFRem(float %a, float %b, ptr addrspace(1) %out) local_unnamed_addr {
 entry:
   %r1 = frem float %a, %b
-  store volatile float %r1, float addrspace(1)* %out
+  store volatile float %r1, ptr addrspace(1) %out
   %r2 = frem nnan float %a, %b
-  store volatile float %r2, float addrspace(1)* %out
+  store volatile float %r2, ptr addrspace(1) %out
   %r3 = frem ninf float %a, %b
-  store volatile float %r3, float addrspace(1)* %out
+  store volatile float %r3, ptr addrspace(1) %out
   %r4 = frem nsz float %a, %b
-  store volatile float %r4, float addrspace(1)* %out
+  store volatile float %r4, ptr addrspace(1) %out
   %r5 = frem arcp float %a, %b
-  store volatile float %r5, float addrspace(1)* %out
+  store volatile float %r5, ptr addrspace(1) %out
   %r6 = frem fast float %a, %b
-  store volatile float %r6, float addrspace(1)* %out
+  store volatile float %r6, ptr addrspace(1) %out
   %r7 = frem nnan ninf float %a, %b
-  store volatile float %r7, float addrspace(1)* %out
+  store volatile float %r7, ptr addrspace(1) %out
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll b/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
index 3f980b1646142..72ccb0b0986f8 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/fsub.ll
@@ -18,21 +18,21 @@
 ; CHECK-SPIRV:     %[[#r6]] = OpFSub %[[#float]]
 ; CHECK-SPIRV:     %[[#r7]] = OpFSub %[[#float]]
 
-define spir_kernel void @testFSub(float %a, float %b, float addrspace(1)* %out) local_unnamed_addr {
+define spir_kernel void @testFSub(float %a, float %b, ptr addrspace(1) %out) local_unnamed_addr {
 entry:
   %r1 = fsub float %a, %b
-  store volatile float %r1, float addrspace(1)* %out
+  store volatile float %r1, ptr addrspace(1) %out
   %r2 = fsub nnan float %a, %b
-  store volatile float %r2, float addrspace(1)* %out
+  store volatile float %r2, ptr addrspace(1) %out
   %r3 = fsub ninf float %a, %b
-  store volatile float %r3, float addrspace(1)* %out
+  store volatile float %r3, ptr addrspace(1) %out
   %r4 = fsub nsz float %a, %b
-  store volatile float %r4, float addrspace(1)* %out
+  store volatile float %r4, ptr addrspace(1) %out
   %r5 = fsub arcp float %a, %b
-  store volatile float %r5, float addrspace(1)* %out
+  store volatile float %r5, ptr addrspace(1) %out
   %r6 = fsub fast float %a, %b
-  store volatile float %r6, float addrspace(1)* %out
+  store volatile float %r6, ptr addrspace(1) %out
   %r7 = fsub nnan ninf float %a, %b
-  store volatile float %r7, float addrspace(1)* %out
+  store volatile float %r7, ptr addrspace(1) %out
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll b/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
index ff1bec4497ba2..0c605548555f5 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/global_block.ll
@@ -30,31 +30,31 @@
 ; CHECK-SPIRV-NEXT: %[[#]] = OpFunctionParameter %[[#int8Ptr]]
 ; CHECK-SPIRV-NEXT: %[[#]] = OpFunctionParameter %[[#int]]
 
-%struct.__opencl_block_literal_generic = type { i32, i32, i8 addrspace(4)* }
+%struct.__opencl_block_literal_generic = type { i32, i32, ptr addrspace(4) }
 
- at block_kernel.b1 = internal addrspace(2) constant %struct.__opencl_block_literal_generic addrspace(4)* addrspacecast (%struct.__opencl_block_literal_generic addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global to %struct.__opencl_block_literal_generic addrspace(1)*) to %struct.__opencl_block_literal_generic addrspace(4)*), align 4
- at __block_literal_global = internal addrspace(1) constant { i32, i32, i8 addrspace(4)* } { i32 12, i32 4, i8 addrspace(4)* addrspacecast (i8* bitcast (i32 (i8 addrspace(4)*, i32)* @_block_invoke to i8*) to i8 addrspace(4)*) }, align 4
+ at block_kernel.b1 = internal addrspace(2) constant ptr addrspace(4) addrspacecast (ptr addrspace(1) @__block_literal_global to ptr addrspace(4)), align 4
+ at __block_literal_global = internal addrspace(1) constant { i32, i32, ptr addrspace(4) } { i32 12, i32 4, ptr addrspace(4) addrspacecast (ptr @_block_invoke to ptr addrspace(4)) }, align 4
 
-define dso_local spir_kernel void @block_kernel(i32 addrspace(1)* noundef %res) {
+define dso_local spir_kernel void @block_kernel(ptr addrspace(1) noundef %res) {
 entry:
-  %res.addr = alloca i32 addrspace(1)*, align 4
-  store i32 addrspace(1)* %res, i32 addrspace(1)** %res.addr, align 4
-  %call = call spir_func i32 @_block_invoke(i8 addrspace(4)* noundef addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global to i8 addrspace(1)*) to i8 addrspace(4)*), i32 noundef 5)
-  %0 = load i32 addrspace(1)*, i32 addrspace(1)** %res.addr, align 4
-  store i32 %call, i32 addrspace(1)* %0, align 4
+  %res.addr = alloca ptr addrspace(1), align 4
+  store ptr addrspace(1) %res, ptr %res.addr, align 4
+  %call = call spir_func i32 @_block_invoke(ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @__block_literal_global to ptr addrspace(4)), i32 noundef 5)
+  %0 = load ptr addrspace(1), ptr %res.addr, align 4
+  store i32 %call, ptr addrspace(1) %0, align 4
   ret void
 }
 
-define internal spir_func i32 @_block_invoke(i8 addrspace(4)* noundef %.block_descriptor, i32 noundef %i) #0 {
+define internal spir_func i32 @_block_invoke(ptr addrspace(4) noundef %.block_descriptor, i32 noundef %i) #0 {
 entry:
-  %.block_descriptor.addr = alloca i8 addrspace(4)*, align 4
+  %.block_descriptor.addr = alloca ptr addrspace(4), align 4
   %i.addr = alloca i32, align 4
-  %block.addr = alloca <{ i32, i32, i8 addrspace(4)* }> addrspace(4)*, align 4
-  store i8 addrspace(4)* %.block_descriptor, i8 addrspace(4)** %.block_descriptor.addr, align 4
-  %block = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)* }> addrspace(4)*
-  store i32 %i, i32* %i.addr, align 4
-  store <{ i32, i32, i8 addrspace(4)* }> addrspace(4)* %block, <{ i32, i32, i8 addrspace(4)* }> addrspace(4)** %block.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  %block.addr = alloca ptr addrspace(4), align 4
+  store ptr addrspace(4) %.block_descriptor, ptr %.block_descriptor.addr, align 4
+  %block = bitcast ptr addrspace(4) %.block_descriptor to ptr addrspace(4)
+  store i32 %i, ptr %i.addr, align 4
+  store ptr addrspace(4) %block, ptr %block.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   %add = add nsw i32 %0, 1
   ret i32 %add
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll b/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
index e654836dbdaf1..c63750ebd8b84 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/group_ops.ll
@@ -20,10 +20,10 @@
 ;;   res[0] = work_group_reduce_max(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupFMax(float noundef %a, float addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupFMax(float noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func float @_Z21work_group_reduce_maxf(float noundef %a)
-  store float %call, float addrspace(1)* %res, align 4
+  store float %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -37,10 +37,10 @@ declare spir_func float @_Z21work_group_reduce_maxf(float noundef) local_unnamed
 ;;   res[0] = work_group_reduce_min(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupFMin(float noundef %a, float addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupFMin(float noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func float @_Z21work_group_reduce_minf(float noundef %a)
-  store float %call, float addrspace(1)* %res, align 4
+  store float %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -54,10 +54,10 @@ declare spir_func float @_Z21work_group_reduce_minf(float noundef) local_unnamed
 ;;   res[0] = work_group_reduce_add(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupFAdd(float noundef %a, float addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupFAdd(float noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func float @_Z21work_group_reduce_addf(float noundef %a)
-  store float %call, float addrspace(1)* %res, align 4
+  store float %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -71,10 +71,10 @@ declare spir_func float @_Z21work_group_reduce_addf(float noundef) local_unnamed
 ;;   res[0] = work_group_scan_inclusive_max(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupScanInclusiveFMax(float noundef %a, float addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupScanInclusiveFMax(float noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func float @_Z29work_group_scan_inclusive_maxf(float noundef %a)
-  store float %call, float addrspace(1)* %res, align 4
+  store float %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -88,10 +88,10 @@ declare spir_func float @_Z29work_group_scan_inclusive_maxf(float noundef) local
 ;;   res[0] = work_group_scan_exclusive_max(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupScanExclusiveFMax(float noundef %a, float addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupScanExclusiveFMax(float noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func float @_Z29work_group_scan_exclusive_maxf(float noundef %a)
-  store float %call, float addrspace(1)* %res, align 4
+  store float %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -105,10 +105,10 @@ declare spir_func float @_Z29work_group_scan_exclusive_maxf(float noundef) local
 ;;   res[0] = work_group_reduce_max(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupSMax(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupSMax(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z21work_group_reduce_maxi(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -122,10 +122,10 @@ declare spir_func i32 @_Z21work_group_reduce_maxi(i32 noundef) local_unnamed_add
 ;;   res[0] = work_group_reduce_min(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupSMin(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupSMin(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z21work_group_reduce_mini(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -139,10 +139,10 @@ declare spir_func i32 @_Z21work_group_reduce_mini(i32 noundef) local_unnamed_add
 ;;   res[0] = work_group_reduce_add(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupIAddSigned(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupIAddSigned(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z21work_group_reduce_addi(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -156,10 +156,10 @@ declare spir_func i32 @_Z21work_group_reduce_addi(i32 noundef) local_unnamed_add
 ;;   res[0] = work_group_reduce_add(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupIAddUnsigned(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupIAddUnsigned(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z21work_group_reduce_addj(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -173,10 +173,10 @@ declare spir_func i32 @_Z21work_group_reduce_addj(i32 noundef) local_unnamed_add
 ;;   res[0] = work_group_reduce_max(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupUMax(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupUMax(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z21work_group_reduce_maxj(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -192,10 +192,10 @@ declare spir_func i32 @_Z21work_group_reduce_maxj(i32 noundef) local_unnamed_add
 ;; }
 ;; #pragma OPENCL EXTENSION cl_khr_subgroups: disable
 
-define dso_local spir_kernel void @testSubGroupUMax(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testSubGroupUMax(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z20sub_group_reduce_maxj(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -209,10 +209,10 @@ declare spir_func i32 @_Z20sub_group_reduce_maxj(i32 noundef) local_unnamed_addr
 ;;   res[0] = work_group_scan_inclusive_max(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupScanInclusiveUMax(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupScanInclusiveUMax(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z29work_group_scan_inclusive_maxj(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -226,10 +226,10 @@ declare spir_func i32 @_Z29work_group_scan_inclusive_maxj(i32 noundef) local_unn
 ;;   res[0] = work_group_scan_exclusive_max(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupScanExclusiveUMax(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupScanExclusiveUMax(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z29work_group_scan_exclusive_maxj(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -243,10 +243,10 @@ declare spir_func i32 @_Z29work_group_scan_exclusive_maxj(i32 noundef) local_unn
 ;;   res[0] = work_group_reduce_min(a);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupUMin(i32 noundef %a, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupUMin(i32 noundef %a, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
   %call = call spir_func i32 @_Z21work_group_reduce_minj(i32 noundef %a)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   ret void
 }
 
@@ -265,13 +265,13 @@ declare spir_func i32 @_Z21work_group_reduce_minj(i32 noundef) local_unnamed_add
 ;;   res[0] = work_group_broadcast(a, *id);
 ;; }
 
-define dso_local spir_kernel void @testWorkGroupBroadcast(i32 noundef %a, i32 addrspace(1)* nocapture noundef readonly %id, i32 addrspace(1)* nocapture noundef writeonly %res) local_unnamed_addr {
+define dso_local spir_kernel void @testWorkGroupBroadcast(i32 noundef %a, ptr addrspace(1) nocapture noundef readonly %id, ptr addrspace(1) nocapture noundef writeonly %res) local_unnamed_addr {
 entry:
-  %0 = load i32, i32 addrspace(1)* %id, align 4
+  %0 = load i32, ptr addrspace(1) %id, align 4
   %call = call spir_func i32 @_Z20work_group_broadcastjj(i32 noundef %a, i32 noundef %0)
   %call_v2 = call spir_func i32 @_Z20work_group_broadcastjj(i32 noundef %a, i32 noundef %0, i32 noundef %0)
   %call_v3 = call spir_func i32 @_Z20work_group_broadcastjj(i32 noundef %a, i32 noundef %0, i32 noundef %0, i32 noundef %0)
-  store i32 %call, i32 addrspace(1)* %res, align 4
+  store i32 %call, ptr addrspace(1) %res, align 4
   %call1 = call spir_func i32 @__spirv_GroupBroadcast(i32 0, i32 noundef %a, i32 noundef %0)
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll b/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
index c5f3f9e1e2e74..d7378b1f335e9 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/isequal.ll
@@ -3,18 +3,18 @@
 
 ; CHECK-SPIRV-NOT: OpSConvert
 
-define spir_kernel void @math_kernel8(<8 x i32> addrspace(1)* nocapture %out, <8 x float> addrspace(1)* nocapture readonly %in1, <8 x float> addrspace(1)* nocapture readonly %in2) {
+define spir_kernel void @math_kernel8(ptr addrspace(1) nocapture %out, ptr addrspace(1) nocapture readonly %in1, ptr addrspace(1) nocapture readonly %in2) {
 entry:
   %call = tail call spir_func i64 @_Z13get_global_idj(i32 0)
   %sext = shl i64 %call, 32
   %idxprom = ashr exact i64 %sext, 32
-  %arrayidx = getelementptr inbounds <8 x float>, <8 x float> addrspace(1)* %in1, i64 %idxprom
-  %0 = load <8 x float>, <8 x float> addrspace(1)* %arrayidx, align 32
-  %arrayidx2 = getelementptr inbounds <8 x float>, <8 x float> addrspace(1)* %in2, i64 %idxprom
-  %1 = load <8 x float>, <8 x float> addrspace(1)* %arrayidx2, align 32
+  %arrayidx = getelementptr inbounds <8 x float>, ptr addrspace(1) %in1, i64 %idxprom
+  %0 = load <8 x float>, ptr addrspace(1) %arrayidx, align 32
+  %arrayidx2 = getelementptr inbounds <8 x float>, ptr addrspace(1) %in2, i64 %idxprom
+  %1 = load <8 x float>, ptr addrspace(1) %arrayidx2, align 32
   %call3 = tail call spir_func <8 x i32> @_Z7isequalDv8_fDv8_f(<8 x float> %0, <8 x float> %1)
-  %arrayidx5 = getelementptr inbounds <8 x i32>, <8 x i32> addrspace(1)* %out, i64 %idxprom
-  store <8 x i32> %call3, <8 x i32> addrspace(1)* %arrayidx5, align 32
+  %arrayidx5 = getelementptr inbounds <8 x i32>, ptr addrspace(1) %out, i64 %idxprom
+  store <8 x i32> %call3, ptr addrspace(1) %arrayidx5, align 32
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/ldexp.ll b/llvm/test/CodeGen/SPIRV/transcoding/ldexp.ll
index d142590282520..f1625e7665ad7 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/ldexp.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/ldexp.ll
@@ -11,12 +11,12 @@
 
 ; CHECK-SPIRV: %{{.*}} ldexp
 
-define dso_local spir_kernel void @test_kernel_half(<3 x half> noundef %x, i32 noundef %k, <3 x half> addrspace(1)* nocapture noundef writeonly %ret) local_unnamed_addr {
+define dso_local spir_kernel void @test_kernel_half(<3 x half> noundef %x, i32 noundef %k, ptr addrspace(1) nocapture noundef writeonly %ret) local_unnamed_addr {
 entry:
   %call = call spir_func <3 x half> @_Z5ldexpDv3_Dhi(<3 x half> noundef %x, i32 noundef %k)
   %extractVec2 = shufflevector <3 x half> %call, <3 x half> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
-  %storetmp3 = bitcast <3 x half> addrspace(1)* %ret to <4 x half> addrspace(1)*
-  store <4 x half> %extractVec2, <4 x half> addrspace(1)* %storetmp3, align 8
+  %storetmp3 = bitcast ptr addrspace(1) %ret to ptr addrspace(1)
+  store <4 x half> %extractVec2, ptr addrspace(1) %storetmp3, align 8
   ret void
 }
 
@@ -28,12 +28,12 @@ declare spir_func <3 x half> @_Z5ldexpDv3_Dhi(<3 x half> noundef, i32 noundef) l
 
 ; CHECK-SPIRV: %{{.*}} ldexp
 
-define dso_local spir_kernel void @test_kernel_float(<3 x float> noundef %x, i32 noundef %k, <3 x float> addrspace(1)* nocapture noundef writeonly %ret) local_unnamed_addr {
+define dso_local spir_kernel void @test_kernel_float(<3 x float> noundef %x, i32 noundef %k, ptr addrspace(1) nocapture noundef writeonly %ret) local_unnamed_addr {
 entry:
   %call = call spir_func <3 x float> @_Z5ldexpDv3_fi(<3 x float> noundef %x, i32 noundef %k)
   %extractVec2 = shufflevector <3 x float> %call, <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
-  %storetmp3 = bitcast <3 x float> addrspace(1)* %ret to <4 x float> addrspace(1)*
-  store <4 x float> %extractVec2, <4 x float> addrspace(1)* %storetmp3, align 16
+  %storetmp3 = bitcast ptr addrspace(1) %ret to ptr addrspace(1)
+  store <4 x float> %extractVec2, ptr addrspace(1) %storetmp3, align 16
   ret void
 }
 
@@ -45,12 +45,12 @@ declare spir_func <3 x float> @_Z5ldexpDv3_fi(<3 x float> noundef, i32 noundef)
 
 ; CHECK-SPIRV: %{{.*}} ldexp
 
-define dso_local spir_kernel void @test_kernel_double(<3 x double> noundef %x, i32 noundef %k, <3 x double> addrspace(1)* nocapture noundef writeonly %ret) local_unnamed_addr {
+define dso_local spir_kernel void @test_kernel_double(<3 x double> noundef %x, i32 noundef %k, ptr addrspace(1) nocapture noundef writeonly %ret) local_unnamed_addr {
 entry:
   %call = call spir_func <3 x double> @_Z5ldexpDv3_di(<3 x double> noundef %x, i32 noundef %k)
   %extractVec2 = shufflevector <3 x double> %call, <3 x double> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
-  %storetmp3 = bitcast <3 x double> addrspace(1)* %ret to <4 x double> addrspace(1)*
-  store <4 x double> %extractVec2, <4 x double> addrspace(1)* %storetmp3, align 32
+  %storetmp3 = bitcast ptr addrspace(1) %ret to ptr addrspace(1)
+  store <4 x double> %extractVec2, ptr addrspace(1) %storetmp3, align 32
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/memory_access.ll b/llvm/test/CodeGen/SPIRV/transcoding/memory_access.ll
index fc757112f168c..3d7e5350848d4 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/memory_access.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/memory_access.ll
@@ -16,23 +16,23 @@
 ; CHECK-SPIRV-NOT: OpStore %[[#]] %[[#]] Aligned 0
 ; CHECK-SPIRV:     OpStore %[[#]] %[[#]]
 
-define spir_kernel void @test_load_store(i32 addrspace(1)* %destMemory, i32 addrspace(1)* %oldValues, i32 %newValue) {
+define spir_kernel void @test_load_store(ptr addrspace(1) %destMemory, ptr addrspace(1) %oldValues, i32 %newValue) {
 entry:
-  %ptr = alloca i32 addrspace(4)*, align 8
-  %0 = addrspacecast i32 addrspace(1)* %oldValues to i32 addrspace(4)*
-  store volatile i32 addrspace(4)* %0, i32 addrspace(4)** %ptr, align 8
-  %1 = load volatile i32 addrspace(4)*, i32 addrspace(4)** %ptr, align 8
-  %2 = load i32, i32 addrspace(4)* %1, align 4
-  %call = call spir_func i32 @_Z14atomic_cmpxchgPVU3AS1iii(i32 addrspace(1)* %destMemory, i32 %2, i32 %newValue)
-  %3 = load volatile i32 addrspace(4)*, i32 addrspace(4)** %ptr, align 8
-  %4 = load volatile i32 addrspace(4)*, i32 addrspace(4)** %ptr
-  %5 = load volatile i32 addrspace(4)*, i32 addrspace(4)** %ptr, align 8, !nontemporal !9
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(4)* %3, i64 0
-  store i32 %call, i32 addrspace(4)* %arrayidx, align 4, !nontemporal !9
-  store i32 addrspace(4)* %5, i32 addrspace(4)** %ptr
+  %ptr = alloca ptr addrspace(4), align 8
+  %0 = addrspacecast ptr addrspace(1) %oldValues to ptr addrspace(4)
+  store volatile ptr addrspace(4) %0, ptr %ptr, align 8
+  %1 = load volatile ptr addrspace(4), ptr %ptr, align 8
+  %2 = load i32, ptr addrspace(4) %1, align 4
+  %call = call spir_func i32 @_Z14atomic_cmpxchgPVU3AS1iii(ptr addrspace(1) %destMemory, i32 %2, i32 %newValue)
+  %3 = load volatile ptr addrspace(4), ptr %ptr, align 8
+  %4 = load volatile ptr addrspace(4), ptr %ptr
+  %5 = load volatile ptr addrspace(4), ptr %ptr, align 8, !nontemporal !9
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(4) %3, i64 0
+  store i32 %call, ptr addrspace(4) %arrayidx, align 4, !nontemporal !9
+  store ptr addrspace(4) %5, ptr %ptr
   ret void
 }
 
-declare spir_func i32 @_Z14atomic_cmpxchgPVU3AS1iii(i32 addrspace(1)*, i32, i32)
+declare spir_func i32 @_Z14atomic_cmpxchgPVU3AS1iii(ptr addrspace(1), i32, i32)
 
 !9 = !{i32 1}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/readonly.ll b/llvm/test/CodeGen/SPIRV/transcoding/readonly.ll
index 051d80319ee04..13513bdecd3a1 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/readonly.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/readonly.ll
@@ -3,7 +3,7 @@
 ; CHECK-SPIRV: OpDecorate %[[#PARAM:]] FuncParamAttr NoWrite
 ; CHECK-SPIRV: %[[#PARAM]] = OpFunctionParameter %{{.*}}
 
-define dso_local spir_kernel void @_ZTSZ4mainE15kernel_function(i32 addrspace(1)* readonly %_arg_) local_unnamed_addr {
+define dso_local spir_kernel void @_ZTSZ4mainE15kernel_function(ptr addrspace(1) readonly %_arg_) local_unnamed_addr {
 entry:
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
index de7673ad7f17e..188fc6f4ecd97 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_double.ll
@@ -39,7 +39,7 @@
 ; CHECK-SPIRV: OpOrdered %[[#BoolVectorTypeID]]
 ; CHECK-SPIRV: OpUnordered %[[#BoolVectorTypeID]]
 
-define dso_local spir_func void @test_scalar(i32 addrspace(4)* nocapture writeonly %out, double %d) local_unnamed_addr {
+define dso_local spir_func void @test_scalar(ptr addrspace(4) nocapture writeonly %out, double %d) local_unnamed_addr {
 entry:
   %call = tail call spir_func i32 @_Z8isfinited(double %d)
   %call1 = tail call spir_func i32 @_Z5isinfd(double %d)
@@ -68,7 +68,7 @@ entry:
   %add23 = add nsw i32 %add21, %call22
   %call24 = tail call spir_func i32 @_Z11isunordereddd(double %d, double %d)
   %add25 = add nsw i32 %add23, %call24
-  store i32 %add25, i32 addrspace(4)* %out, align 4
+  store i32 %add25, ptr addrspace(4) %out, align 4
   ret void
 }
 
@@ -100,7 +100,7 @@ declare spir_func i32 @_Z9isordereddd(double, double) local_unnamed_addr
 
 declare spir_func i32 @_Z11isunordereddd(double, double) local_unnamed_addr
 
-define dso_local spir_func void @test_vector(<2 x i64> addrspace(4)* nocapture writeonly %out, <2 x double> %d) local_unnamed_addr {
+define dso_local spir_func void @test_vector(ptr addrspace(4) nocapture writeonly %out, <2 x double> %d) local_unnamed_addr {
 entry:
   %call = tail call spir_func <2 x i64> @_Z8isfiniteDv2_d(<2 x double> %d)
   %call1 = tail call spir_func <2 x i64> @_Z5isinfDv2_d(<2 x double> %d)
@@ -129,7 +129,7 @@ entry:
   %add23 = add <2 x i64> %add21, %call22
   %call24 = tail call spir_func <2 x i64> @_Z11isunorderedDv2_dS_(<2 x double> %d, <2 x double> %d)
   %add25 = add <2 x i64> %add23, %call24
-  store <2 x i64> %add25, <2 x i64> addrspace(4)* %out, align 16
+  store <2 x i64> %add25, ptr addrspace(4) %out, align 16
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
index 69a4a30fd65ef..be4e27cf55283 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_float.ll
@@ -39,7 +39,7 @@
 ; CHECK-SPIRV: OpOrdered %[[#BoolVectorTypeID]]
 ; CHECK-SPIRV: OpUnordered %[[#BoolVectorTypeID]]
 
-define dso_local spir_func void @test_scalar(i32 addrspace(4)* nocapture writeonly %out, float %f) local_unnamed_addr {
+define dso_local spir_func void @test_scalar(ptr addrspace(4) nocapture writeonly %out, float %f) local_unnamed_addr {
 entry:
   %call = tail call spir_func i32 @_Z8isfinitef(float %f)
   %call1 = tail call spir_func i32 @_Z5isinff(float %f)
@@ -68,7 +68,7 @@ entry:
   %add23 = add nsw i32 %add21, %call22
   %call24 = tail call spir_func i32 @_Z11isunorderedff(float %f, float %f)
   %add25 = add nsw i32 %add23, %call24
-  store i32 %add25, i32 addrspace(4)* %out, align 4
+  store i32 %add25, ptr addrspace(4) %out, align 4
   ret void
 }
 
@@ -100,7 +100,7 @@ declare spir_func i32 @_Z9isorderedff(float, float) local_unnamed_addr
 
 declare spir_func i32 @_Z11isunorderedff(float, float) local_unnamed_addr
 
-define dso_local spir_func void @test_vector(<2 x i32> addrspace(4)* nocapture writeonly %out, <2 x float> %f) local_unnamed_addr {
+define dso_local spir_func void @test_vector(ptr addrspace(4) nocapture writeonly %out, <2 x float> %f) local_unnamed_addr {
 entry:
   %call = tail call spir_func <2 x i32> @_Z8isfiniteDv2_f(<2 x float> %f)
   %call1 = tail call spir_func <2 x i32> @_Z5isinfDv2_f(<2 x float> %f)
@@ -129,7 +129,7 @@ entry:
   %add23 = add <2 x i32> %add21, %call22
   %call24 = tail call spir_func <2 x i32> @_Z11isunorderedDv2_fS_(<2 x float> %f, <2 x float> %f)
   %add25 = add <2 x i32> %add23, %call24
-  store <2 x i32> %add25, <2 x i32> addrspace(4)* %out, align 8
+  store <2 x i32> %add25, ptr addrspace(4) %out, align 8
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll b/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
index d6a7fda41afd0..b5637ad9819fe 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/relationals_half.ll
@@ -38,7 +38,7 @@
 ; CHECK-SPIRV: OpOrdered %[[#BoolVectorTypeID]]
 ; CHECK-SPIRV: OpUnordered %[[#BoolVectorTypeID]]
 
-define dso_local spir_func void @test_scalar(i32 addrspace(4)* nocapture writeonly %out, half %h) local_unnamed_addr {
+define dso_local spir_func void @test_scalar(ptr addrspace(4) nocapture writeonly %out, half %h) local_unnamed_addr {
 entry:
   %call = tail call spir_func i32 @_Z8isfiniteDh(half %h)
   %call1 = tail call spir_func i32 @_Z5isinfDh(half %h)
@@ -67,7 +67,7 @@ entry:
   %add23 = add nsw i32 %add21, %call22
   %call24 = tail call spir_func i32 @_Z11isunorderedDhDh(half %h, half %h)
   %add25 = add nsw i32 %add23, %call24
-  store i32 %add25, i32 addrspace(4)* %out, align 4
+  store i32 %add25, ptr addrspace(4) %out, align 4
   ret void
 }
 
@@ -99,7 +99,7 @@ declare spir_func i32 @_Z9isorderedDhDh(half, half) local_unnamed_addr
 
 declare spir_func i32 @_Z11isunorderedDhDh(half, half) local_unnamed_addr
 
-define dso_local spir_func void @test_vector(<2 x i16> addrspace(4)* nocapture writeonly %out, <2 x half> %h) local_unnamed_addr {
+define dso_local spir_func void @test_vector(ptr addrspace(4) nocapture writeonly %out, <2 x half> %h) local_unnamed_addr {
 entry:
   %call = tail call spir_func <2 x i16> @_Z8isfiniteDv2_Dh(<2 x half> %h)
   %call1 = tail call spir_func <2 x i16> @_Z5isinfDv2_Dh(<2 x half> %h)
@@ -128,7 +128,7 @@ entry:
   %add23 = add <2 x i16> %add21, %call22
   %call24 = tail call spir_func <2 x i16> @_Z11isunorderedDv2_DhS_(<2 x half> %h, <2 x half> %h)
   %add25 = add <2 x i16> %add23, %call24
-  store <2 x i16> %add25, <2 x i16> addrspace(4)* %out, align 4
+  store <2 x i16> %add25, ptr addrspace(4) %out, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spec_const.ll b/llvm/test/CodeGen/SPIRV/transcoding/spec_const.ll
index 8ce76534c50db..2c8c0efe77597 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/spec_const.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spec_const.ll
@@ -23,32 +23,32 @@
 ; CHECK-SPIRV-DAG: %[[#SC6]] = OpSpecConstant %[[#]] 1067450368
 ; CHECK-SPIRV-DAG: %[[#SC7]] = OpSpecConstant %[[#]] 0 1073807360
 
-define spir_kernel void @foo(i8 addrspace(1)* nocapture %b, i8 addrspace(1)* nocapture %c, i16 addrspace(1)* nocapture %s, i32 addrspace(1)* nocapture %i, i64 addrspace(1)* nocapture %l, half addrspace(1)* nocapture %h, float addrspace(1)* nocapture %f, double addrspace(1)* nocapture %d) local_unnamed_addr {
+define spir_kernel void @foo(ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture %c, ptr addrspace(1) nocapture %s, ptr addrspace(1) nocapture %i, ptr addrspace(1) nocapture %l, ptr addrspace(1) nocapture %h, ptr addrspace(1) nocapture %f, ptr addrspace(1) nocapture %d) local_unnamed_addr {
 entry:
   %0 = call i1 @_Z20__spirv_SpecConstantib(i32 0, i1 false)
   %conv = zext i1 %0 to i8
-  store i8 %conv, i8 addrspace(1)* %b, align 1
+  store i8 %conv, ptr addrspace(1) %b, align 1
 
   %1 = call i8 @_Z20__spirv_SpecConstantia(i32 1, i8 100)
-  store i8 %1, i8 addrspace(1)* %c, align 1
+  store i8 %1, ptr addrspace(1) %c, align 1
 
   %2 = call i16 @_Z20__spirv_SpecConstantis(i32 2, i16 1)
-  store i16 %2, i16 addrspace(1)* %s, align 2
+  store i16 %2, ptr addrspace(1) %s, align 2
 
   %3 = call i32 @_Z20__spirv_SpecConstantii(i32 3, i32 2)
-  store i32 %3, i32 addrspace(1)* %i, align 4
+  store i32 %3, ptr addrspace(1) %i, align 4
 
   %4 = call i64 @_Z20__spirv_SpecConstantix(i32 4, i64 3)
-  store i64 %4, i64 addrspace(1)* %l, align 8
+  store i64 %4, ptr addrspace(1) %l, align 8
 
   %5 = call half @_Z20__spirv_SpecConstantih(i32 5, half 0xH3800)
-  store half %5, half addrspace(1)* %h, align 2
+  store half %5, ptr addrspace(1) %h, align 2
 
   %6 = call float @_Z20__spirv_SpecConstantif(i32 6, float 1.250000e+00)
-  store float %6, float addrspace(1)* %f, align 4
+  store float %6, ptr addrspace(1) %f, align 4
 
   %7 = call double @_Z20__spirv_SpecConstantid(i32 7, double 2.125000e+00)
-  store double %7, double addrspace(1)* %d, align 8
+  store double %7, ptr addrspace(1) %d, align 8
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
index 710e5c533f9cb..668e4b90555ba 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
@@ -38,11 +38,11 @@ define spir_func void @test() {
 entry:
   %arr = alloca [3 x i32], align 4
   %arr2 = alloca [3 x i32], align 4
-  %0 = bitcast [3 x i32]* %arr to i8*
-  call void @llvm.memcpy.p0i8.p2i8.i32(i8* align 4 %0, i8 addrspace(2)* align 4 bitcast ([3 x i32] addrspace(2)* @__const.test.arr to i8 addrspace(2)*), i32 12, i1 false)
-  %1 = bitcast [3 x i32]* %arr2 to i8*
-  call void @llvm.memcpy.p0i8.p2i8.i32(i8* align 4 %1, i8 addrspace(2)* align 4 bitcast ([3 x i32] addrspace(2)* @__const.test.arr2 to i8 addrspace(2)*), i32 12, i1 false)
+  %0 = bitcast ptr %arr to ptr
+  call void @llvm.memcpy.p0.p2.i32(ptr align 4 %0, ptr addrspace(2) align 4 @__const.test.arr, i32 12, i1 false)
+  %1 = bitcast ptr %arr2 to ptr
+  call void @llvm.memcpy.p0.p2.i32(ptr align 4 %1, ptr addrspace(2) align 4 @__const.test.arr2, i32 12, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p2i8.i32(i8* nocapture writeonly, i8 addrspace(2)* nocapture readonly, i32, i1)
+declare void @llvm.memcpy.p0.p2.i32(ptr nocapture writeonly, ptr addrspace(2) nocapture readonly, i32, i1)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_ballot.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_ballot.ll
index 059169eafa97b..a5493e9b35cc9 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_ballot.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_ballot.ll
@@ -861,36 +861,36 @@ declare dso_local spir_func double @_Z25sub_group_broadcast_firstd(double) local
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBallotFindMSB %[[#int]] %[[#ScopeSubgroup]] %[[#ballot2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testBallotOperations(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testBallotOperations(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func <4 x i32> @_Z16sub_group_balloti(i32 0)
   %r2 = tail call spir_func <4 x i32> @__spirv_GroupNonUniformBallot(i32 3, i1 false)
   %3 = tail call spir_func i32 @_Z24sub_group_inverse_ballotDv4_j(<4 x i32> %2)
   %r3 = tail call spir_func i1 @__spirv_GroupNonUniformInverseBallot(i32 3, <4 x i32> %r2)
-  store i32 %3, i32 addrspace(1)* %0, align 4
+  store i32 %3, ptr addrspace(1) %0, align 4
   %4 = tail call spir_func i32 @_Z28sub_group_ballot_bit_extractDv4_jj(<4 x i32> %2, i32 0)
   %r4 = tail call spir_func i32 @__spirv_GroupNonUniformBallotBitExtract(i32 3, <4 x i32> %r2, i32 0)
-  %5 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %4, i32 addrspace(1)* %5, align 4
+  %5 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %4, ptr addrspace(1) %5, align 4
   %6 = tail call spir_func i32 @_Z26sub_group_ballot_bit_countDv4_j(<4 x i32> %2)
   %r6 = tail call spir_func i32 @__spirv_GroupNonUniformBallotBitCount(i32 3, i32 0, <4 x i32> %r2)
-  %7 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %6, i32 addrspace(1)* %7, align 4
+  %7 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %6, ptr addrspace(1) %7, align 4
   %8 = tail call spir_func i32 @_Z31sub_group_ballot_inclusive_scanDv4_j(<4 x i32> %2)
   %r8 = tail call spir_func i32 @__spirv_GroupNonUniformBallotBitCount(i32 3, i32 1, <4 x i32> %r2)
-  %9 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
-  store i32 %8, i32 addrspace(1)* %9, align 4
+  %9 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 3
+  store i32 %8, ptr addrspace(1) %9, align 4
   %10 = tail call spir_func i32 @_Z31sub_group_ballot_exclusive_scanDv4_j(<4 x i32> %2)
   %r10 = tail call spir_func i32 @__spirv_GroupNonUniformBallotBitCount(i32 3, i32 2, <4 x i32> %r2)
-  %11 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 4
-  store i32 %10, i32 addrspace(1)* %11, align 4
+  %11 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 4
+  store i32 %10, ptr addrspace(1) %11, align 4
   %12 = tail call spir_func i32 @_Z25sub_group_ballot_find_lsbDv4_j(<4 x i32> %2)
   %r12 = tail call spir_func i32 @__spirv_GroupNonUniformBallotFindLSB(i32 3, <4 x i32> %r2)
-  %13 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 5
-  store i32 %12, i32 addrspace(1)* %13, align 4
+  %13 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 5
+  store i32 %12, ptr addrspace(1) %13, align 4
   %14 = tail call spir_func i32 @_Z25sub_group_ballot_find_msbDv4_j(<4 x i32> %2)
   %r14 = tail call spir_func i32 @__spirv_GroupNonUniformBallotFindMSB(i32 3, <4 x i32> %r2)
-  %15 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 6
-  store i32 %14, i32 addrspace(1)* %15, align 4
+  %15 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 6
+  store i32 %14, ptr addrspace(1) %15, align 4
   ret void
 }
 
@@ -924,21 +924,21 @@ declare dso_local spir_func i32 @__spirv_GroupNonUniformBallotFindMSB(i32, <4 x
 ; CHECK-SPIRV: %[[#]] = OpLoad %[[#int4]] %[[#ltMask]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testSubgroupMasks(<4 x i32> addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testSubgroupMasks(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func <4 x i32> @_Z21get_sub_group_eq_maskv()
-  store <4 x i32> %2, <4 x i32> addrspace(1)* %0, align 16
+  store <4 x i32> %2, ptr addrspace(1) %0, align 16
   %3 = tail call spir_func <4 x i32> @_Z21get_sub_group_ge_maskv()
-  %4 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %0, i64 1
-  store <4 x i32> %3, <4 x i32> addrspace(1)* %4, align 16
+  %4 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %0, i64 1
+  store <4 x i32> %3, ptr addrspace(1) %4, align 16
   %5 = tail call spir_func <4 x i32> @_Z21get_sub_group_gt_maskv()
-  %6 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %0, i64 2
-  store <4 x i32> %5, <4 x i32> addrspace(1)* %6, align 16
+  %6 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %0, i64 2
+  store <4 x i32> %5, ptr addrspace(1) %6, align 16
   %7 = tail call spir_func <4 x i32> @_Z21get_sub_group_le_maskv()
-  %8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %0, i64 3
-  store <4 x i32> %7, <4 x i32> addrspace(1)* %8, align 16
+  %8 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %0, i64 3
+  store <4 x i32> %7, ptr addrspace(1) %8, align 16
   %9 = tail call spir_func <4 x i32> @_Z21get_sub_group_lt_maskv()
-  %10 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %0, i64 4
-  store <4 x i32> %9, <4 x i32> addrspace(1)* %10, align 16
+  %10 = getelementptr inbounds <4 x i32>, ptr addrspace(1) %0, i64 4
+  store <4 x i32> %9, ptr addrspace(1) %10, align 16
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_clustered_reduce.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_clustered_reduce.ll
index ccfd810720a74..4b09c30c78d96 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_clustered_reduce.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_clustered_reduce.ll
@@ -204,18 +204,18 @@
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#char]] %[[#ScopeSubgroup]] ClusteredReduce %[[#char_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i8 @_Z30sub_group_clustered_reduce_addcj(i8 signext 0, i32 2)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func signext i8 @_Z30sub_group_clustered_reduce_mulcj(i8 signext 0, i32 2)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func signext i8 @_Z30sub_group_clustered_reduce_mincj(i8 signext 0, i32 2)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   %7 = tail call spir_func signext i8 @_Z30sub_group_clustered_reduce_maxcj(i8 signext 0, i32 2)
-  %8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
-  store i8 %7, i8 addrspace(1)* %8, align 1
+  %8 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 3
+  store i8 %7, ptr addrspace(1) %8, align 1
   ret void
 }
 
@@ -234,18 +234,18 @@ declare dso_local spir_func signext i8 @_Z30sub_group_clustered_reduce_maxcj(i8
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#char]] %[[#ScopeSubgroup]] ClusteredReduce %[[#char_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticUChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticUChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i8 @_Z30sub_group_clustered_reduce_addhj(i8 zeroext 0, i32 2)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func zeroext i8 @_Z30sub_group_clustered_reduce_mulhj(i8 zeroext 0, i32 2)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func zeroext i8 @_Z30sub_group_clustered_reduce_minhj(i8 zeroext 0, i32 2)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   %7 = tail call spir_func zeroext i8 @_Z30sub_group_clustered_reduce_maxhj(i8 zeroext 0, i32 2)
-  %8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
-  store i8 %7, i8 addrspace(1)* %8, align 1
+  %8 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 3
+  store i8 %7, ptr addrspace(1) %8, align 1
   ret void
 }
 
@@ -264,18 +264,18 @@ declare dso_local spir_func zeroext i8 @_Z30sub_group_clustered_reduce_maxhj(i8
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#short]] %[[#ScopeSubgroup]] ClusteredReduce %[[#short_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i16 @_Z30sub_group_clustered_reduce_addsj(i16 signext 0, i32 2)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func signext i16 @_Z30sub_group_clustered_reduce_mulsj(i16 signext 0, i32 2)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func signext i16 @_Z30sub_group_clustered_reduce_minsj(i16 signext 0, i32 2)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func signext i16 @_Z30sub_group_clustered_reduce_maxsj(i16 signext 0, i32 2)
-  %8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
-  store i16 %7, i16 addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 3
+  store i16 %7, ptr addrspace(1) %8, align 2
   ret void
 }
 
@@ -294,18 +294,18 @@ declare dso_local spir_func signext i16 @_Z30sub_group_clustered_reduce_maxsj(i1
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#short]] %[[#ScopeSubgroup]] ClusteredReduce %[[#short_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticUShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticUShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i16 @_Z30sub_group_clustered_reduce_addtj(i16 zeroext 0, i32 2)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func zeroext i16 @_Z30sub_group_clustered_reduce_multj(i16 zeroext 0, i32 2)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func zeroext i16 @_Z30sub_group_clustered_reduce_mintj(i16 zeroext 0, i32 2)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func zeroext i16 @_Z30sub_group_clustered_reduce_maxtj(i16 zeroext 0, i32 2)
-  %8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
-  store i16 %7, i16 addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 3
+  store i16 %7, ptr addrspace(1) %8, align 2
   ret void
 }
 
@@ -324,18 +324,18 @@ declare dso_local spir_func zeroext i16 @_Z30sub_group_clustered_reduce_maxtj(i1
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#int]] %[[#ScopeSubgroup]] ClusteredReduce %[[#int_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_addij(i32 0, i32 2)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_mulij(i32 0, i32 2)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_minij(i32 0, i32 2)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_maxij(i32 0, i32 2)
-  %8 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
-  store i32 %7, i32 addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 3
+  store i32 %7, ptr addrspace(1) %8, align 4
   ret void
 }
 
@@ -354,18 +354,18 @@ declare dso_local spir_func i32 @_Z30sub_group_clustered_reduce_maxij(i32, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#int]] %[[#ScopeSubgroup]] ClusteredReduce %[[#int_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticUInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticUInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_addjj(i32 0, i32 2)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_muljj(i32 0, i32 2)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_minjj(i32 0, i32 2)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_maxjj(i32 0, i32 2)
-  %8 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
-  store i32 %7, i32 addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 3
+  store i32 %7, ptr addrspace(1) %8, align 4
   ret void
 }
 
@@ -384,18 +384,18 @@ declare dso_local spir_func i32 @_Z30sub_group_clustered_reduce_maxjj(i32, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#long]] %[[#ScopeSubgroup]] ClusteredReduce %[[#long_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticLong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticLong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_addlj(i64 0, i32 2)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_mullj(i64 0, i32 2)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_minlj(i64 0, i32 2)
-  %6 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 2
-  store i64 %5, i64 addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 2
+  store i64 %5, ptr addrspace(1) %6, align 8
   %7 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_maxlj(i64 0, i32 2)
-  %8 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 3
-  store i64 %7, i64 addrspace(1)* %8, align 8
+  %8 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 3
+  store i64 %7, ptr addrspace(1) %8, align 8
   ret void
 }
 
@@ -414,18 +414,18 @@ declare dso_local spir_func i64 @_Z30sub_group_clustered_reduce_maxlj(i64, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#long]] %[[#ScopeSubgroup]] ClusteredReduce %[[#long_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticULong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticULong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_addmj(i64 0, i32 2)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_mulmj(i64 0, i32 2)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_minmj(i64 0, i32 2)
-  %6 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 2
-  store i64 %5, i64 addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 2
+  store i64 %5, ptr addrspace(1) %6, align 8
   %7 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_maxmj(i64 0, i32 2)
-  %8 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 3
-  store i64 %7, i64 addrspace(1)* %8, align 8
+  %8 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 3
+  store i64 %7, ptr addrspace(1) %8, align 8
   ret void
 }
 
@@ -444,18 +444,18 @@ declare dso_local spir_func i64 @_Z30sub_group_clustered_reduce_maxmj(i64, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMax %[[#float]] %[[#ScopeSubgroup]] ClusteredReduce %[[#float_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticFloat(float addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticFloat(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func float @_Z30sub_group_clustered_reduce_addfj(float 0.000000e+00, i32 2)
-  store float %2, float addrspace(1)* %0, align 4
+  store float %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func float @_Z30sub_group_clustered_reduce_mulfj(float 0.000000e+00, i32 2)
-  %4 = getelementptr inbounds float, float addrspace(1)* %0, i64 1
-  store float %3, float addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds float, ptr addrspace(1) %0, i64 1
+  store float %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func float @_Z30sub_group_clustered_reduce_minfj(float 0.000000e+00, i32 2)
-  %6 = getelementptr inbounds float, float addrspace(1)* %0, i64 2
-  store float %5, float addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds float, ptr addrspace(1) %0, i64 2
+  store float %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func float @_Z30sub_group_clustered_reduce_maxfj(float 0.000000e+00, i32 2)
-  %8 = getelementptr inbounds float, float addrspace(1)* %0, i64 3
-  store float %7, float addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds float, ptr addrspace(1) %0, i64 3
+  store float %7, ptr addrspace(1) %8, align 4
   ret void
 }
 
@@ -474,18 +474,18 @@ declare dso_local spir_func float @_Z30sub_group_clustered_reduce_maxfj(float, i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMax %[[#half]] %[[#ScopeSubgroup]] ClusteredReduce %[[#half_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticHalf(half addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticHalf(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func half @_Z30sub_group_clustered_reduce_addDhj(half 0xH0000, i32 2)
-  store half %2, half addrspace(1)* %0, align 2
+  store half %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func half @_Z30sub_group_clustered_reduce_mulDhj(half 0xH0000, i32 2)
-  %4 = getelementptr inbounds half, half addrspace(1)* %0, i64 1
-  store half %3, half addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds half, ptr addrspace(1) %0, i64 1
+  store half %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func half @_Z30sub_group_clustered_reduce_minDhj(half 0xH0000, i32 2)
-  %6 = getelementptr inbounds half, half addrspace(1)* %0, i64 2
-  store half %5, half addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds half, ptr addrspace(1) %0, i64 2
+  store half %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func half @_Z30sub_group_clustered_reduce_maxDhj(half 0xH0000, i32 2)
-  %8 = getelementptr inbounds half, half addrspace(1)* %0, i64 3
-  store half %7, half addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds half, ptr addrspace(1) %0, i64 3
+  store half %7, ptr addrspace(1) %8, align 2
   ret void
 }
 
@@ -504,18 +504,18 @@ declare dso_local spir_func half @_Z30sub_group_clustered_reduce_maxDhj(half, i3
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMax %[[#double]] %[[#ScopeSubgroup]] ClusteredReduce %[[#double_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredArithmeticDouble(double addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredArithmeticDouble(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func double @_Z30sub_group_clustered_reduce_adddj(double 0.000000e+00, i32 2)
-  store double %2, double addrspace(1)* %0, align 8
+  store double %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func double @_Z30sub_group_clustered_reduce_muldj(double 0.000000e+00, i32 2)
-  %4 = getelementptr inbounds double, double addrspace(1)* %0, i64 1
-  store double %3, double addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds double, ptr addrspace(1) %0, i64 1
+  store double %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func double @_Z30sub_group_clustered_reduce_mindj(double 0.000000e+00, i32 2)
-  %6 = getelementptr inbounds double, double addrspace(1)* %0, i64 2
-  store double %5, double addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds double, ptr addrspace(1) %0, i64 2
+  store double %5, ptr addrspace(1) %6, align 8
   %7 = tail call spir_func double @_Z30sub_group_clustered_reduce_maxdj(double 0.000000e+00, i32 2)
-  %8 = getelementptr inbounds double, double addrspace(1)* %0, i64 3
-  store double %7, double addrspace(1)* %8, align 8
+  %8 = getelementptr inbounds double, ptr addrspace(1) %0, i64 3
+  store double %7, ptr addrspace(1) %8, align 8
   ret void
 }
 
@@ -533,15 +533,15 @@ declare dso_local spir_func double @_Z30sub_group_clustered_reduce_maxdj(double,
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] ClusteredReduce %[[#char_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredBitwiseChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredBitwiseChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i8 @_Z30sub_group_clustered_reduce_andcj(i8 signext 0, i32 2)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func signext i8 @_Z29sub_group_clustered_reduce_orcj(i8 signext 0, i32 2)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func signext i8 @_Z30sub_group_clustered_reduce_xorcj(i8 signext 0, i32 2)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   ret void
 }
 
@@ -557,15 +557,15 @@ declare dso_local spir_func signext i8 @_Z30sub_group_clustered_reduce_xorcj(i8
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] ClusteredReduce %[[#char_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredBitwiseUChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredBitwiseUChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i8 @_Z30sub_group_clustered_reduce_andhj(i8 zeroext 0, i32 2)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func zeroext i8 @_Z29sub_group_clustered_reduce_orhj(i8 zeroext 0, i32 2)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func zeroext i8 @_Z30sub_group_clustered_reduce_xorhj(i8 zeroext 0, i32 2)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   ret void
 }
 
@@ -581,15 +581,15 @@ declare dso_local spir_func zeroext i8 @_Z30sub_group_clustered_reduce_xorhj(i8
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#short]] %[[#ScopeSubgroup]] ClusteredReduce %[[#short_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredBitwiseShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredBitwiseShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i16 @_Z30sub_group_clustered_reduce_andsj(i16 signext 0, i32 2)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func signext i16 @_Z29sub_group_clustered_reduce_orsj(i16 signext 0, i32 2)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func signext i16 @_Z30sub_group_clustered_reduce_xorsj(i16 signext 0, i32 2)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   ret void
 }
 
@@ -605,15 +605,15 @@ declare dso_local spir_func signext i16 @_Z30sub_group_clustered_reduce_xorsj(i1
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#short]] %[[#ScopeSubgroup]] ClusteredReduce %[[#short_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredBitwiseUShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredBitwiseUShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i16 @_Z30sub_group_clustered_reduce_andtj(i16 zeroext 0, i32 2)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func zeroext i16 @_Z29sub_group_clustered_reduce_ortj(i16 zeroext 0, i32 2)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func zeroext i16 @_Z30sub_group_clustered_reduce_xortj(i16 zeroext 0, i32 2)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   ret void
 }
 
@@ -629,15 +629,15 @@ declare dso_local spir_func zeroext i16 @_Z30sub_group_clustered_reduce_xortj(i1
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#int]] %[[#ScopeSubgroup]] ClusteredReduce %[[#int_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredBitwiseInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredBitwiseInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_andij(i32 0, i32 2)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z29sub_group_clustered_reduce_orij(i32 0, i32 2)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_xorij(i32 0, i32 2)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   ret void
 }
 
@@ -653,15 +653,15 @@ declare dso_local spir_func i32 @_Z30sub_group_clustered_reduce_xorij(i32, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#int]] %[[#ScopeSubgroup]] ClusteredReduce %[[#int_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredBitwiseUInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredBitwiseUInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_andjj(i32 0, i32 2)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z29sub_group_clustered_reduce_orjj(i32 0, i32 2)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z30sub_group_clustered_reduce_xorjj(i32 0, i32 2)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   ret void
 }
 
@@ -677,15 +677,15 @@ declare dso_local spir_func i32 @_Z30sub_group_clustered_reduce_xorjj(i32, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#long]] %[[#ScopeSubgroup]] ClusteredReduce %[[#long_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredBitwiseLong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredBitwiseLong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_andlj(i64 0, i32 2)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z29sub_group_clustered_reduce_orlj(i64 0, i32 2)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_xorlj(i64 0, i32 2)
-  %6 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 2
-  store i64 %5, i64 addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 2
+  store i64 %5, ptr addrspace(1) %6, align 8
   ret void
 }
 
@@ -701,15 +701,15 @@ declare dso_local spir_func i64 @_Z30sub_group_clustered_reduce_xorlj(i64, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#long]] %[[#ScopeSubgroup]] ClusteredReduce %[[#long_0]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredBitwiseULong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredBitwiseULong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_andmj(i64 0, i32 2)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z29sub_group_clustered_reduce_ormj(i64 0, i32 2)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func i64 @_Z30sub_group_clustered_reduce_xormj(i64 0, i32 2)
-  %6 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 2
-  store i64 %5, i64 addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 2
+  store i64 %5, ptr addrspace(1) %6, align 8
   ret void
 }
 
@@ -725,15 +725,15 @@ declare dso_local spir_func i64 @_Z30sub_group_clustered_reduce_xormj(i64, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformLogicalXor %[[#bool]] %[[#ScopeSubgroup]] ClusteredReduce %[[#false]] %[[#int_2]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testClusteredLogical(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testClusteredLogical(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z38sub_group_clustered_reduce_logical_andij(i32 0, i32 2)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z37sub_group_clustered_reduce_logical_orij(i32 0, i32 2)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z38sub_group_clustered_reduce_logical_xorij(i32 0, i32 2)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_extended_types.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_extended_types.ll
index 98fac1d28df89..a23feab313436 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_extended_types.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_extended_types.ll
@@ -803,33 +803,33 @@ declare dso_local spir_func <16 x double> @_Z19sub_group_broadcastDv16_dj(<16 x
 ; CHECK-SPIRV: %[[#]] = OpGroupSMax %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testReduceScanChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testReduceScanChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i8 @_Z20sub_group_reduce_addc(i8 signext 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func signext i8 @_Z20sub_group_reduce_minc(i8 signext 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func signext i8 @_Z20sub_group_reduce_maxc(i8 signext 0)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   %7 = tail call spir_func signext i8 @_Z28sub_group_scan_inclusive_addc(i8 signext 0)
-  %8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
-  store i8 %7, i8 addrspace(1)* %8, align 1
+  %8 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 3
+  store i8 %7, ptr addrspace(1) %8, align 1
   %9 = tail call spir_func signext i8 @_Z28sub_group_scan_inclusive_minc(i8 signext 0)
-  %10 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 4
-  store i8 %9, i8 addrspace(1)* %10, align 1
+  %10 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 4
+  store i8 %9, ptr addrspace(1) %10, align 1
   %11 = tail call spir_func signext i8 @_Z28sub_group_scan_inclusive_maxc(i8 signext 0)
-  %12 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 5
-  store i8 %11, i8 addrspace(1)* %12, align 1
+  %12 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 5
+  store i8 %11, ptr addrspace(1) %12, align 1
   %13 = tail call spir_func signext i8 @_Z28sub_group_scan_exclusive_addc(i8 signext 0)
-  %14 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 6
-  store i8 %13, i8 addrspace(1)* %14, align 1
+  %14 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 6
+  store i8 %13, ptr addrspace(1) %14, align 1
   %15 = tail call spir_func signext i8 @_Z28sub_group_scan_exclusive_minc(i8 signext 0)
-  %16 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 7
-  store i8 %15, i8 addrspace(1)* %16, align 1
+  %16 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 7
+  store i8 %15, ptr addrspace(1) %16, align 1
   %17 = tail call spir_func signext i8 @_Z28sub_group_scan_exclusive_maxc(i8 signext 0)
-  %18 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
-  store i8 %17, i8 addrspace(1)* %18, align 1
+  %18 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 8
+  store i8 %17, ptr addrspace(1) %18, align 1
   ret void
 }
 
@@ -863,33 +863,33 @@ declare dso_local spir_func signext i8 @_Z28sub_group_scan_exclusive_maxc(i8 sig
 ; CHECK-SPIRV: %[[#]] = OpGroupUMax %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testReduceScanUChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testReduceScanUChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i8 @_Z20sub_group_reduce_addh(i8 zeroext 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func zeroext i8 @_Z20sub_group_reduce_minh(i8 zeroext 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func zeroext i8 @_Z20sub_group_reduce_maxh(i8 zeroext 0)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   %7 = tail call spir_func zeroext i8 @_Z28sub_group_scan_inclusive_addh(i8 zeroext 0)
-  %8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
-  store i8 %7, i8 addrspace(1)* %8, align 1
+  %8 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 3
+  store i8 %7, ptr addrspace(1) %8, align 1
   %9 = tail call spir_func zeroext i8 @_Z28sub_group_scan_inclusive_minh(i8 zeroext 0)
-  %10 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 4
-  store i8 %9, i8 addrspace(1)* %10, align 1
+  %10 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 4
+  store i8 %9, ptr addrspace(1) %10, align 1
   %11 = tail call spir_func zeroext i8 @_Z28sub_group_scan_inclusive_maxh(i8 zeroext 0)
-  %12 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 5
-  store i8 %11, i8 addrspace(1)* %12, align 1
+  %12 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 5
+  store i8 %11, ptr addrspace(1) %12, align 1
   %13 = tail call spir_func zeroext i8 @_Z28sub_group_scan_exclusive_addh(i8 zeroext 0)
-  %14 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 6
-  store i8 %13, i8 addrspace(1)* %14, align 1
+  %14 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 6
+  store i8 %13, ptr addrspace(1) %14, align 1
   %15 = tail call spir_func zeroext i8 @_Z28sub_group_scan_exclusive_minh(i8 zeroext 0)
-  %16 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 7
-  store i8 %15, i8 addrspace(1)* %16, align 1
+  %16 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 7
+  store i8 %15, ptr addrspace(1) %16, align 1
   %17 = tail call spir_func zeroext i8 @_Z28sub_group_scan_exclusive_maxh(i8 zeroext 0)
-  %18 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
-  store i8 %17, i8 addrspace(1)* %18, align 1
+  %18 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 8
+  store i8 %17, ptr addrspace(1) %18, align 1
   ret void
 }
 
@@ -923,33 +923,33 @@ declare dso_local spir_func zeroext i8 @_Z28sub_group_scan_exclusive_maxh(i8 zer
 ; CHECK-SPIRV: %[[#]] = OpGroupSMax %[[#short]] %[[#ScopeSubgroup]] ExclusiveScan %[[#short_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testReduceScanShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testReduceScanShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i16 @_Z20sub_group_reduce_adds(i16 signext 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func signext i16 @_Z20sub_group_reduce_mins(i16 signext 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func signext i16 @_Z20sub_group_reduce_maxs(i16 signext 0)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func signext i16 @_Z28sub_group_scan_inclusive_adds(i16 signext 0)
-  %8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
-  store i16 %7, i16 addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 3
+  store i16 %7, ptr addrspace(1) %8, align 2
   %9 = tail call spir_func signext i16 @_Z28sub_group_scan_inclusive_mins(i16 signext 0)
-  %10 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 4
-  store i16 %9, i16 addrspace(1)* %10, align 2
+  %10 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 4
+  store i16 %9, ptr addrspace(1) %10, align 2
   %11 = tail call spir_func signext i16 @_Z28sub_group_scan_inclusive_maxs(i16 signext 0)
-  %12 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 5
-  store i16 %11, i16 addrspace(1)* %12, align 2
+  %12 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 5
+  store i16 %11, ptr addrspace(1) %12, align 2
   %13 = tail call spir_func signext i16 @_Z28sub_group_scan_exclusive_adds(i16 signext 0)
-  %14 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 6
-  store i16 %13, i16 addrspace(1)* %14, align 2
+  %14 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 6
+  store i16 %13, ptr addrspace(1) %14, align 2
   %15 = tail call spir_func signext i16 @_Z28sub_group_scan_exclusive_mins(i16 signext 0)
-  %16 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 7
-  store i16 %15, i16 addrspace(1)* %16, align 2
+  %16 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 7
+  store i16 %15, ptr addrspace(1) %16, align 2
   %17 = tail call spir_func signext i16 @_Z28sub_group_scan_exclusive_maxs(i16 signext 0)
-  %18 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 8
-  store i16 %17, i16 addrspace(1)* %18, align 2
+  %18 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 8
+  store i16 %17, ptr addrspace(1) %18, align 2
   ret void
 }
 
@@ -983,33 +983,33 @@ declare dso_local spir_func signext i16 @_Z28sub_group_scan_exclusive_maxs(i16 s
 ; CHECK-SPIRV: %[[#]] = OpGroupUMax %[[#short]] %[[#ScopeSubgroup]] ExclusiveScan %[[#short_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testReduceScanUShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testReduceScanUShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i16 @_Z20sub_group_reduce_addt(i16 zeroext 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func zeroext i16 @_Z20sub_group_reduce_mint(i16 zeroext 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func zeroext i16 @_Z20sub_group_reduce_maxt(i16 zeroext 0)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func zeroext i16 @_Z28sub_group_scan_inclusive_addt(i16 zeroext 0)
-  %8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
-  store i16 %7, i16 addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 3
+  store i16 %7, ptr addrspace(1) %8, align 2
   %9 = tail call spir_func zeroext i16 @_Z28sub_group_scan_inclusive_mint(i16 zeroext 0)
-  %10 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 4
-  store i16 %9, i16 addrspace(1)* %10, align 2
+  %10 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 4
+  store i16 %9, ptr addrspace(1) %10, align 2
   %11 = tail call spir_func zeroext i16 @_Z28sub_group_scan_inclusive_maxt(i16 zeroext 0)
-  %12 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 5
-  store i16 %11, i16 addrspace(1)* %12, align 2
+  %12 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 5
+  store i16 %11, ptr addrspace(1) %12, align 2
   %13 = tail call spir_func zeroext i16 @_Z28sub_group_scan_exclusive_addt(i16 zeroext 0)
-  %14 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 6
-  store i16 %13, i16 addrspace(1)* %14, align 2
+  %14 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 6
+  store i16 %13, ptr addrspace(1) %14, align 2
   %15 = tail call spir_func zeroext i16 @_Z28sub_group_scan_exclusive_mint(i16 zeroext 0)
-  %16 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 7
-  store i16 %15, i16 addrspace(1)* %16, align 2
+  %16 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 7
+  store i16 %15, ptr addrspace(1) %16, align 2
   %17 = tail call spir_func zeroext i16 @_Z28sub_group_scan_exclusive_maxt(i16 zeroext 0)
-  %18 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 8
-  store i16 %17, i16 addrspace(1)* %18, align 2
+  %18 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 8
+  store i16 %17, ptr addrspace(1) %18, align 2
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
index 3b7e55cdd8263..33a3b68592c40 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
@@ -359,46 +359,46 @@
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_addc(i8 signext 0)
   %r2 = tail call spir_func signext i8 @__spirv_GroupNonUniformIAdd(i32 3, i32 0, i8 signext 10)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_mulc(i8 signext 0)
   %r3 = tail call spir_func signext i8 @__spirv_GroupNonUniformIMul(i32 3, i32 1, i8 signext 10)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_minc(i8 signext 0)
   %r5 = tail call spir_func signext i8 @__spirv_GroupNonUniformSMin(i32 3, i32 0, i8 signext 10, i32 32)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   %7 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_maxc(i8 signext 0)
   %r7 = tail call spir_func signext i8 @__spirv_GroupNonUniformSMax(i32 3, i32 0, i8 signext 10, i32 32)
-  %8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
-  store i8 %7, i8 addrspace(1)* %8, align 1
+  %8 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 3
+  store i8 %7, ptr addrspace(1) %8, align 1
   %9 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_addc(i8 signext 0)
-  %10 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 4
-  store i8 %9, i8 addrspace(1)* %10, align 1
+  %10 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 4
+  store i8 %9, ptr addrspace(1) %10, align 1
   %11 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_mulc(i8 signext 0)
-  %12 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 5
-  store i8 %11, i8 addrspace(1)* %12, align 1
+  %12 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 5
+  store i8 %11, ptr addrspace(1) %12, align 1
   %13 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_minc(i8 signext 0)
-  %14 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 6
-  store i8 %13, i8 addrspace(1)* %14, align 1
+  %14 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 6
+  store i8 %13, ptr addrspace(1) %14, align 1
   %15 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_maxc(i8 signext 0)
-  %16 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 7
-  store i8 %15, i8 addrspace(1)* %16, align 1
+  %16 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 7
+  store i8 %15, ptr addrspace(1) %16, align 1
   %17 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive_addc(i8 signext 0)
-  %18 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
-  store i8 %17, i8 addrspace(1)* %18, align 1
+  %18 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 8
+  store i8 %17, ptr addrspace(1) %18, align 1
   %19 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive_mulc(i8 signext 0)
-  %20 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 9
-  store i8 %19, i8 addrspace(1)* %20, align 1
+  %20 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 9
+  store i8 %19, ptr addrspace(1) %20, align 1
   %21 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive_minc(i8 signext 0)
-  %22 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 10
-  store i8 %21, i8 addrspace(1)* %22, align 1
+  %22 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 10
+  store i8 %21, ptr addrspace(1) %22, align 1
   %23 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive_maxc(i8 signext 0)
-  %24 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 11
-  store i8 %23, i8 addrspace(1)* %24, align 1
+  %24 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 11
+  store i8 %23, ptr addrspace(1) %24, align 1
   ret void
 }
 
@@ -445,42 +445,42 @@ declare dso_local spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticUChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticUChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i8 @_Z32sub_group_non_uniform_reduce_addh(i8 zeroext 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func zeroext i8 @_Z32sub_group_non_uniform_reduce_mulh(i8 zeroext 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func zeroext i8 @_Z32sub_group_non_uniform_reduce_minh(i8 zeroext 0)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   %7 = tail call spir_func zeroext i8 @_Z32sub_group_non_uniform_reduce_maxh(i8 zeroext 0)
-  %8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
-  store i8 %7, i8 addrspace(1)* %8, align 1
+  %8 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 3
+  store i8 %7, ptr addrspace(1) %8, align 1
   %9 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_inclusive_addh(i8 zeroext 0)
-  %10 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 4
-  store i8 %9, i8 addrspace(1)* %10, align 1
+  %10 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 4
+  store i8 %9, ptr addrspace(1) %10, align 1
   %11 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_inclusive_mulh(i8 zeroext 0)
-  %12 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 5
-  store i8 %11, i8 addrspace(1)* %12, align 1
+  %12 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 5
+  store i8 %11, ptr addrspace(1) %12, align 1
   %13 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_inclusive_minh(i8 zeroext 0)
-  %14 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 6
-  store i8 %13, i8 addrspace(1)* %14, align 1
+  %14 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 6
+  store i8 %13, ptr addrspace(1) %14, align 1
   %15 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_inclusive_maxh(i8 zeroext 0)
-  %16 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 7
-  store i8 %15, i8 addrspace(1)* %16, align 1
+  %16 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 7
+  store i8 %15, ptr addrspace(1) %16, align 1
   %17 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_exclusive_addh(i8 zeroext 0)
-  %18 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
-  store i8 %17, i8 addrspace(1)* %18, align 1
+  %18 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 8
+  store i8 %17, ptr addrspace(1) %18, align 1
   %19 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_exclusive_mulh(i8 zeroext 0)
-  %20 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 9
-  store i8 %19, i8 addrspace(1)* %20, align 1
+  %20 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 9
+  store i8 %19, ptr addrspace(1) %20, align 1
   %21 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_exclusive_minh(i8 zeroext 0)
-  %22 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 10
-  store i8 %21, i8 addrspace(1)* %22, align 1
+  %22 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 10
+  store i8 %21, ptr addrspace(1) %22, align 1
   %23 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_exclusive_maxh(i8 zeroext 0)
-  %24 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 11
-  store i8 %23, i8 addrspace(1)* %24, align 1
+  %24 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 11
+  store i8 %23, ptr addrspace(1) %24, align 1
   ret void
 }
 
@@ -523,42 +523,42 @@ declare dso_local spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_exclusive
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#short]] %[[#ScopeSubgroup]] ExclusiveScan %[[#short_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i16 @_Z32sub_group_non_uniform_reduce_adds(i16 signext 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func signext i16 @_Z32sub_group_non_uniform_reduce_muls(i16 signext 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func signext i16 @_Z32sub_group_non_uniform_reduce_mins(i16 signext 0)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func signext i16 @_Z32sub_group_non_uniform_reduce_maxs(i16 signext 0)
-  %8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
-  store i16 %7, i16 addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 3
+  store i16 %7, ptr addrspace(1) %8, align 2
   %9 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_inclusive_adds(i16 signext 0)
-  %10 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 4
-  store i16 %9, i16 addrspace(1)* %10, align 2
+  %10 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 4
+  store i16 %9, ptr addrspace(1) %10, align 2
   %11 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_inclusive_muls(i16 signext 0)
-  %12 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 5
-  store i16 %11, i16 addrspace(1)* %12, align 2
+  %12 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 5
+  store i16 %11, ptr addrspace(1) %12, align 2
   %13 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_inclusive_mins(i16 signext 0)
-  %14 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 6
-  store i16 %13, i16 addrspace(1)* %14, align 2
+  %14 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 6
+  store i16 %13, ptr addrspace(1) %14, align 2
   %15 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_inclusive_maxs(i16 signext 0)
-  %16 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 7
-  store i16 %15, i16 addrspace(1)* %16, align 2
+  %16 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 7
+  store i16 %15, ptr addrspace(1) %16, align 2
   %17 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusive_adds(i16 signext 0)
-  %18 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 8
-  store i16 %17, i16 addrspace(1)* %18, align 2
+  %18 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 8
+  store i16 %17, ptr addrspace(1) %18, align 2
   %19 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusive_muls(i16 signext 0)
-  %20 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 9
-  store i16 %19, i16 addrspace(1)* %20, align 2
+  %20 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 9
+  store i16 %19, ptr addrspace(1) %20, align 2
   %21 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusive_mins(i16 signext 0)
-  %22 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 10
-  store i16 %21, i16 addrspace(1)* %22, align 2
+  %22 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 10
+  store i16 %21, ptr addrspace(1) %22, align 2
   %23 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusive_maxs(i16 signext 0)
-  %24 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 11
-  store i16 %23, i16 addrspace(1)* %24, align 2
+  %24 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 11
+  store i16 %23, ptr addrspace(1) %24, align 2
   ret void
 }
 
@@ -603,44 +603,44 @@ declare dso_local spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusiv
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#short]] %[[#ScopeSubgroup]] ExclusiveScan %[[#short_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticUShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticUShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_addt(i16 zeroext 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_mult(i16 zeroext 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_mint(i16 zeroext 0)
   %r5 = tail call spir_func signext i16 @__spirv_GroupNonUniformUMin(i32 3, i32 0, i16 signext 0, i32 32)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_maxt(i16 zeroext 0)
   %r7 = tail call spir_func signext i16 @__spirv_GroupNonUniformUMax(i32 3, i32 0, i16 signext 0, i32 32)
-  %8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
-  store i16 %7, i16 addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 3
+  store i16 %7, ptr addrspace(1) %8, align 2
   %9 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_inclusive_addt(i16 zeroext 0)
-  %10 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 4
-  store i16 %9, i16 addrspace(1)* %10, align 2
+  %10 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 4
+  store i16 %9, ptr addrspace(1) %10, align 2
   %11 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_inclusive_mult(i16 zeroext 0)
-  %12 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 5
-  store i16 %11, i16 addrspace(1)* %12, align 2
+  %12 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 5
+  store i16 %11, ptr addrspace(1) %12, align 2
   %13 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_inclusive_mint(i16 zeroext 0)
-  %14 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 6
-  store i16 %13, i16 addrspace(1)* %14, align 2
+  %14 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 6
+  store i16 %13, ptr addrspace(1) %14, align 2
   %15 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_inclusive_maxt(i16 zeroext 0)
-  %16 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 7
-  store i16 %15, i16 addrspace(1)* %16, align 2
+  %16 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 7
+  store i16 %15, ptr addrspace(1) %16, align 2
   %17 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_exclusive_addt(i16 zeroext 0)
-  %18 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 8
-  store i16 %17, i16 addrspace(1)* %18, align 2
+  %18 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 8
+  store i16 %17, ptr addrspace(1) %18, align 2
   %19 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_exclusive_mult(i16 zeroext 0)
-  %20 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 9
-  store i16 %19, i16 addrspace(1)* %20, align 2
+  %20 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 9
+  store i16 %19, ptr addrspace(1) %20, align 2
   %21 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_exclusive_mint(i16 zeroext 0)
-  %22 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 10
-  store i16 %21, i16 addrspace(1)* %22, align 2
+  %22 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 10
+  store i16 %21, ptr addrspace(1) %22, align 2
   %23 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_exclusive_maxt(i16 zeroext 0)
-  %24 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 11
-  store i16 %23, i16 addrspace(1)* %24, align 2
+  %24 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 11
+  store i16 %23, ptr addrspace(1) %24, align 2
   ret void
 }
 
@@ -685,42 +685,42 @@ declare dso_local spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_exclusiv
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#int]] %[[#ScopeSubgroup]] ExclusiveScan %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_addi(i32 0)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_muli(i32 0)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_mini(i32 0)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_maxi(i32 0)
-  %8 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
-  store i32 %7, i32 addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 3
+  store i32 %7, ptr addrspace(1) %8, align 4
   %9 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_addi(i32 0)
-  %10 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 4
-  store i32 %9, i32 addrspace(1)* %10, align 4
+  %10 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 4
+  store i32 %9, ptr addrspace(1) %10, align 4
   %11 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_muli(i32 0)
-  %12 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 5
-  store i32 %11, i32 addrspace(1)* %12, align 4
+  %12 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 5
+  store i32 %11, ptr addrspace(1) %12, align 4
   %13 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_mini(i32 0)
-  %14 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 6
-  store i32 %13, i32 addrspace(1)* %14, align 4
+  %14 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 6
+  store i32 %13, ptr addrspace(1) %14, align 4
   %15 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_maxi(i32 0)
-  %16 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 7
-  store i32 %15, i32 addrspace(1)* %16, align 4
+  %16 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 7
+  store i32 %15, ptr addrspace(1) %16, align 4
   %17 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_addi(i32 0)
-  %18 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 8
-  store i32 %17, i32 addrspace(1)* %18, align 4
+  %18 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 8
+  store i32 %17, ptr addrspace(1) %18, align 4
   %19 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_muli(i32 0)
-  %20 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 9
-  store i32 %19, i32 addrspace(1)* %20, align 4
+  %20 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 9
+  store i32 %19, ptr addrspace(1) %20, align 4
   %21 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_mini(i32 0)
-  %22 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 10
-  store i32 %21, i32 addrspace(1)* %22, align 4
+  %22 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 10
+  store i32 %21, ptr addrspace(1) %22, align 4
   %23 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_maxi(i32 0)
-  %24 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 11
-  store i32 %23, i32 addrspace(1)* %24, align 4
+  %24 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 11
+  store i32 %23, ptr addrspace(1) %24, align 4
   ret void
 }
 
@@ -763,42 +763,42 @@ declare dso_local spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_maxi(i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#int]] %[[#ScopeSubgroup]] ExclusiveScan %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticUInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticUInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_addj(i32 0)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_mulj(i32 0)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_minj(i32 0)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_maxj(i32 0)
-  %8 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
-  store i32 %7, i32 addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 3
+  store i32 %7, ptr addrspace(1) %8, align 4
   %9 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_addj(i32 0)
-  %10 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 4
-  store i32 %9, i32 addrspace(1)* %10, align 4
+  %10 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 4
+  store i32 %9, ptr addrspace(1) %10, align 4
   %11 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_mulj(i32 0)
-  %12 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 5
-  store i32 %11, i32 addrspace(1)* %12, align 4
+  %12 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 5
+  store i32 %11, ptr addrspace(1) %12, align 4
   %13 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_minj(i32 0)
-  %14 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 6
-  store i32 %13, i32 addrspace(1)* %14, align 4
+  %14 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 6
+  store i32 %13, ptr addrspace(1) %14, align 4
   %15 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_maxj(i32 0)
-  %16 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 7
-  store i32 %15, i32 addrspace(1)* %16, align 4
+  %16 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 7
+  store i32 %15, ptr addrspace(1) %16, align 4
   %17 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_addj(i32 0)
-  %18 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 8
-  store i32 %17, i32 addrspace(1)* %18, align 4
+  %18 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 8
+  store i32 %17, ptr addrspace(1) %18, align 4
   %19 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_mulj(i32 0)
-  %20 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 9
-  store i32 %19, i32 addrspace(1)* %20, align 4
+  %20 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 9
+  store i32 %19, ptr addrspace(1) %20, align 4
   %21 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_minj(i32 0)
-  %22 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 10
-  store i32 %21, i32 addrspace(1)* %22, align 4
+  %22 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 10
+  store i32 %21, ptr addrspace(1) %22, align 4
   %23 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_maxj(i32 0)
-  %24 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 11
-  store i32 %23, i32 addrspace(1)* %24, align 4
+  %24 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 11
+  store i32 %23, ptr addrspace(1) %24, align 4
   ret void
 }
 
@@ -841,42 +841,42 @@ declare dso_local spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_maxj(i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformSMax %[[#long]] %[[#ScopeSubgroup]] ExclusiveScan %[[#long_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticLong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticLong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_addl(i64 0)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_mull(i64 0)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_minl(i64 0)
-  %6 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 2
-  store i64 %5, i64 addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 2
+  store i64 %5, ptr addrspace(1) %6, align 8
   %7 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_maxl(i64 0)
-  %8 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 3
-  store i64 %7, i64 addrspace(1)* %8, align 8
+  %8 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 3
+  store i64 %7, ptr addrspace(1) %8, align 8
   %9 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_addl(i64 0)
-  %10 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 4
-  store i64 %9, i64 addrspace(1)* %10, align 8
+  %10 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 4
+  store i64 %9, ptr addrspace(1) %10, align 8
   %11 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_mull(i64 0)
-  %12 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 5
-  store i64 %11, i64 addrspace(1)* %12, align 8
+  %12 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 5
+  store i64 %11, ptr addrspace(1) %12, align 8
   %13 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_minl(i64 0)
-  %14 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 6
-  store i64 %13, i64 addrspace(1)* %14, align 8
+  %14 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 6
+  store i64 %13, ptr addrspace(1) %14, align 8
   %15 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_maxl(i64 0)
-  %16 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 7
-  store i64 %15, i64 addrspace(1)* %16, align 8
+  %16 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 7
+  store i64 %15, ptr addrspace(1) %16, align 8
   %17 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_addl(i64 0)
-  %18 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 8
-  store i64 %17, i64 addrspace(1)* %18, align 8
+  %18 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 8
+  store i64 %17, ptr addrspace(1) %18, align 8
   %19 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_mull(i64 0)
-  %20 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 9
-  store i64 %19, i64 addrspace(1)* %20, align 8
+  %20 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 9
+  store i64 %19, ptr addrspace(1) %20, align 8
   %21 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_minl(i64 0)
-  %22 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 10
-  store i64 %21, i64 addrspace(1)* %22, align 8
+  %22 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 10
+  store i64 %21, ptr addrspace(1) %22, align 8
   %23 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_maxl(i64 0)
-  %24 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 11
-  store i64 %23, i64 addrspace(1)* %24, align 8
+  %24 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 11
+  store i64 %23, ptr addrspace(1) %24, align 8
   ret void
 }
 
@@ -919,42 +919,42 @@ declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_maxl(i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformUMax %[[#long]] %[[#ScopeSubgroup]] ExclusiveScan %[[#long_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticULong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticULong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_addm(i64 0)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_mulm(i64 0)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_minm(i64 0)
-  %6 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 2
-  store i64 %5, i64 addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 2
+  store i64 %5, ptr addrspace(1) %6, align 8
   %7 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_maxm(i64 0)
-  %8 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 3
-  store i64 %7, i64 addrspace(1)* %8, align 8
+  %8 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 3
+  store i64 %7, ptr addrspace(1) %8, align 8
   %9 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_addm(i64 0)
-  %10 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 4
-  store i64 %9, i64 addrspace(1)* %10, align 8
+  %10 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 4
+  store i64 %9, ptr addrspace(1) %10, align 8
   %11 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_mulm(i64 0)
-  %12 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 5
-  store i64 %11, i64 addrspace(1)* %12, align 8
+  %12 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 5
+  store i64 %11, ptr addrspace(1) %12, align 8
   %13 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_minm(i64 0)
-  %14 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 6
-  store i64 %13, i64 addrspace(1)* %14, align 8
+  %14 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 6
+  store i64 %13, ptr addrspace(1) %14, align 8
   %15 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_maxm(i64 0)
-  %16 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 7
-  store i64 %15, i64 addrspace(1)* %16, align 8
+  %16 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 7
+  store i64 %15, ptr addrspace(1) %16, align 8
   %17 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_addm(i64 0)
-  %18 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 8
-  store i64 %17, i64 addrspace(1)* %18, align 8
+  %18 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 8
+  store i64 %17, ptr addrspace(1) %18, align 8
   %19 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_mulm(i64 0)
-  %20 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 9
-  store i64 %19, i64 addrspace(1)* %20, align 8
+  %20 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 9
+  store i64 %19, ptr addrspace(1) %20, align 8
   %21 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_minm(i64 0)
-  %22 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 10
-  store i64 %21, i64 addrspace(1)* %22, align 8
+  %22 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 10
+  store i64 %21, ptr addrspace(1) %22, align 8
   %23 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_maxm(i64 0)
-  %24 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 11
-  store i64 %23, i64 addrspace(1)* %24, align 8
+  %24 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 11
+  store i64 %23, ptr addrspace(1) %24, align 8
   ret void
 }
 
@@ -997,46 +997,46 @@ declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_maxm(i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMax %[[#float]] %[[#ScopeSubgroup]] ExclusiveScan %[[#float_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticFloat(float addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticFloat(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func float @_Z32sub_group_non_uniform_reduce_addf(float 0.000000e+00)
   %r2 = tail call spir_func float @__spirv_GroupNonUniformFAdd(i32 3, i32 0, float 0.000000e+00)
-  store float %2, float addrspace(1)* %0, align 4
+  store float %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func float @_Z32sub_group_non_uniform_reduce_mulf(float 0.000000e+00)
   %r3 = tail call spir_func float @__spirv_GroupNonUniformFMul(i32 3, i32 0, float 0.000000e+00)
-  %4 = getelementptr inbounds float, float addrspace(1)* %0, i64 1
-  store float %3, float addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds float, ptr addrspace(1) %0, i64 1
+  store float %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func float @_Z32sub_group_non_uniform_reduce_minf(float 0.000000e+00)
   %r5 = tail call spir_func float @__spirv_GroupNonUniformFMin(i32 3, i32 0, float 0.000000e+00)
-  %6 = getelementptr inbounds float, float addrspace(1)* %0, i64 2
-  store float %5, float addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds float, ptr addrspace(1) %0, i64 2
+  store float %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func float @_Z32sub_group_non_uniform_reduce_maxf(float 0.000000e+00)
   %r7 = tail call spir_func float @__spirv_GroupNonUniformFMax(i32 3, i32 0, float 0.000000e+00)
-  %8 = getelementptr inbounds float, float addrspace(1)* %0, i64 3
-  store float %7, float addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds float, ptr addrspace(1) %0, i64 3
+  store float %7, ptr addrspace(1) %8, align 4
   %9 = tail call spir_func float @_Z40sub_group_non_uniform_scan_inclusive_addf(float 0.000000e+00)
-  %10 = getelementptr inbounds float, float addrspace(1)* %0, i64 4
-  store float %9, float addrspace(1)* %10, align 4
+  %10 = getelementptr inbounds float, ptr addrspace(1) %0, i64 4
+  store float %9, ptr addrspace(1) %10, align 4
   %11 = tail call spir_func float @_Z40sub_group_non_uniform_scan_inclusive_mulf(float 0.000000e+00)
-  %12 = getelementptr inbounds float, float addrspace(1)* %0, i64 5
-  store float %11, float addrspace(1)* %12, align 4
+  %12 = getelementptr inbounds float, ptr addrspace(1) %0, i64 5
+  store float %11, ptr addrspace(1) %12, align 4
   %13 = tail call spir_func float @_Z40sub_group_non_uniform_scan_inclusive_minf(float 0.000000e+00)
-  %14 = getelementptr inbounds float, float addrspace(1)* %0, i64 6
-  store float %13, float addrspace(1)* %14, align 4
+  %14 = getelementptr inbounds float, ptr addrspace(1) %0, i64 6
+  store float %13, ptr addrspace(1) %14, align 4
   %15 = tail call spir_func float @_Z40sub_group_non_uniform_scan_inclusive_maxf(float 0.000000e+00)
-  %16 = getelementptr inbounds float, float addrspace(1)* %0, i64 7
-  store float %15, float addrspace(1)* %16, align 4
+  %16 = getelementptr inbounds float, ptr addrspace(1) %0, i64 7
+  store float %15, ptr addrspace(1) %16, align 4
   %17 = tail call spir_func float @_Z40sub_group_non_uniform_scan_exclusive_addf(float 0.000000e+00)
-  %18 = getelementptr inbounds float, float addrspace(1)* %0, i64 8
-  store float %17, float addrspace(1)* %18, align 4
+  %18 = getelementptr inbounds float, ptr addrspace(1) %0, i64 8
+  store float %17, ptr addrspace(1) %18, align 4
   %19 = tail call spir_func float @_Z40sub_group_non_uniform_scan_exclusive_mulf(float 0.000000e+00)
-  %20 = getelementptr inbounds float, float addrspace(1)* %0, i64 9
-  store float %19, float addrspace(1)* %20, align 4
+  %20 = getelementptr inbounds float, ptr addrspace(1) %0, i64 9
+  store float %19, ptr addrspace(1) %20, align 4
   %21 = tail call spir_func float @_Z40sub_group_non_uniform_scan_exclusive_minf(float 0.000000e+00)
-  %22 = getelementptr inbounds float, float addrspace(1)* %0, i64 10
-  store float %21, float addrspace(1)* %22, align 4
+  %22 = getelementptr inbounds float, ptr addrspace(1) %0, i64 10
+  store float %21, ptr addrspace(1) %22, align 4
   %23 = tail call spir_func float @_Z40sub_group_non_uniform_scan_exclusive_maxf(float 0.000000e+00)
-  %24 = getelementptr inbounds float, float addrspace(1)* %0, i64 11
-  store float %23, float addrspace(1)* %24, align 4
+  %24 = getelementptr inbounds float, ptr addrspace(1) %0, i64 11
+  store float %23, ptr addrspace(1) %24, align 4
   ret void
 }
 
@@ -1083,42 +1083,42 @@ declare dso_local spir_func float @_Z40sub_group_non_uniform_scan_exclusive_maxf
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMax %[[#half]] %[[#ScopeSubgroup]] ExclusiveScan %[[#half_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticHalf(half addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticHalf(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func half @_Z32sub_group_non_uniform_reduce_addDh(half 0xH0000)
-  store half %2, half addrspace(1)* %0, align 2
+  store half %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func half @_Z32sub_group_non_uniform_reduce_mulDh(half 0xH0000)
-  %4 = getelementptr inbounds half, half addrspace(1)* %0, i64 1
-  store half %3, half addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds half, ptr addrspace(1) %0, i64 1
+  store half %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func half @_Z32sub_group_non_uniform_reduce_minDh(half 0xH0000)
-  %6 = getelementptr inbounds half, half addrspace(1)* %0, i64 2
-  store half %5, half addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds half, ptr addrspace(1) %0, i64 2
+  store half %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func half @_Z32sub_group_non_uniform_reduce_maxDh(half 0xH0000)
-  %8 = getelementptr inbounds half, half addrspace(1)* %0, i64 3
-  store half %7, half addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds half, ptr addrspace(1) %0, i64 3
+  store half %7, ptr addrspace(1) %8, align 2
   %9 = tail call spir_func half @_Z40sub_group_non_uniform_scan_inclusive_addDh(half 0xH0000)
-  %10 = getelementptr inbounds half, half addrspace(1)* %0, i64 4
-  store half %9, half addrspace(1)* %10, align 2
+  %10 = getelementptr inbounds half, ptr addrspace(1) %0, i64 4
+  store half %9, ptr addrspace(1) %10, align 2
   %11 = tail call spir_func half @_Z40sub_group_non_uniform_scan_inclusive_mulDh(half 0xH0000)
-  %12 = getelementptr inbounds half, half addrspace(1)* %0, i64 5
-  store half %11, half addrspace(1)* %12, align 2
+  %12 = getelementptr inbounds half, ptr addrspace(1) %0, i64 5
+  store half %11, ptr addrspace(1) %12, align 2
   %13 = tail call spir_func half @_Z40sub_group_non_uniform_scan_inclusive_minDh(half 0xH0000)
-  %14 = getelementptr inbounds half, half addrspace(1)* %0, i64 6
-  store half %13, half addrspace(1)* %14, align 2
+  %14 = getelementptr inbounds half, ptr addrspace(1) %0, i64 6
+  store half %13, ptr addrspace(1) %14, align 2
   %15 = tail call spir_func half @_Z40sub_group_non_uniform_scan_inclusive_maxDh(half 0xH0000)
-  %16 = getelementptr inbounds half, half addrspace(1)* %0, i64 7
-  store half %15, half addrspace(1)* %16, align 2
+  %16 = getelementptr inbounds half, ptr addrspace(1) %0, i64 7
+  store half %15, ptr addrspace(1) %16, align 2
   %17 = tail call spir_func half @_Z40sub_group_non_uniform_scan_exclusive_addDh(half 0xH0000)
-  %18 = getelementptr inbounds half, half addrspace(1)* %0, i64 8
-  store half %17, half addrspace(1)* %18, align 2
+  %18 = getelementptr inbounds half, ptr addrspace(1) %0, i64 8
+  store half %17, ptr addrspace(1) %18, align 2
   %19 = tail call spir_func half @_Z40sub_group_non_uniform_scan_exclusive_mulDh(half 0xH0000)
-  %20 = getelementptr inbounds half, half addrspace(1)* %0, i64 9
-  store half %19, half addrspace(1)* %20, align 2
+  %20 = getelementptr inbounds half, ptr addrspace(1) %0, i64 9
+  store half %19, ptr addrspace(1) %20, align 2
   %21 = tail call spir_func half @_Z40sub_group_non_uniform_scan_exclusive_minDh(half 0xH0000)
-  %22 = getelementptr inbounds half, half addrspace(1)* %0, i64 10
-  store half %21, half addrspace(1)* %22, align 2
+  %22 = getelementptr inbounds half, ptr addrspace(1) %0, i64 10
+  store half %21, ptr addrspace(1) %22, align 2
   %23 = tail call spir_func half @_Z40sub_group_non_uniform_scan_exclusive_maxDh(half 0xH0000)
-  %24 = getelementptr inbounds half, half addrspace(1)* %0, i64 11
-  store half %23, half addrspace(1)* %24, align 2
+  %24 = getelementptr inbounds half, ptr addrspace(1) %0, i64 11
+  store half %23, ptr addrspace(1) %24, align 2
   ret void
 }
 
@@ -1161,42 +1161,42 @@ declare dso_local spir_func half @_Z40sub_group_non_uniform_scan_exclusive_maxDh
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformFMax %[[#double]] %[[#ScopeSubgroup]] ExclusiveScan %[[#double_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformArithmeticDouble(double addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformArithmeticDouble(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func double @_Z32sub_group_non_uniform_reduce_addd(double 0.000000e+00)
-  store double %2, double addrspace(1)* %0, align 8
+  store double %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func double @_Z32sub_group_non_uniform_reduce_muld(double 0.000000e+00)
-  %4 = getelementptr inbounds double, double addrspace(1)* %0, i64 1
-  store double %3, double addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds double, ptr addrspace(1) %0, i64 1
+  store double %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func double @_Z32sub_group_non_uniform_reduce_mind(double 0.000000e+00)
-  %6 = getelementptr inbounds double, double addrspace(1)* %0, i64 2
-  store double %5, double addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds double, ptr addrspace(1) %0, i64 2
+  store double %5, ptr addrspace(1) %6, align 8
   %7 = tail call spir_func double @_Z32sub_group_non_uniform_reduce_maxd(double 0.000000e+00)
-  %8 = getelementptr inbounds double, double addrspace(1)* %0, i64 3
-  store double %7, double addrspace(1)* %8, align 8
+  %8 = getelementptr inbounds double, ptr addrspace(1) %0, i64 3
+  store double %7, ptr addrspace(1) %8, align 8
   %9 = tail call spir_func double @_Z40sub_group_non_uniform_scan_inclusive_addd(double 0.000000e+00)
-  %10 = getelementptr inbounds double, double addrspace(1)* %0, i64 4
-  store double %9, double addrspace(1)* %10, align 8
+  %10 = getelementptr inbounds double, ptr addrspace(1) %0, i64 4
+  store double %9, ptr addrspace(1) %10, align 8
   %11 = tail call spir_func double @_Z40sub_group_non_uniform_scan_inclusive_muld(double 0.000000e+00)
-  %12 = getelementptr inbounds double, double addrspace(1)* %0, i64 5
-  store double %11, double addrspace(1)* %12, align 8
+  %12 = getelementptr inbounds double, ptr addrspace(1) %0, i64 5
+  store double %11, ptr addrspace(1) %12, align 8
   %13 = tail call spir_func double @_Z40sub_group_non_uniform_scan_inclusive_mind(double 0.000000e+00)
-  %14 = getelementptr inbounds double, double addrspace(1)* %0, i64 6
-  store double %13, double addrspace(1)* %14, align 8
+  %14 = getelementptr inbounds double, ptr addrspace(1) %0, i64 6
+  store double %13, ptr addrspace(1) %14, align 8
   %15 = tail call spir_func double @_Z40sub_group_non_uniform_scan_inclusive_maxd(double 0.000000e+00)
-  %16 = getelementptr inbounds double, double addrspace(1)* %0, i64 7
-  store double %15, double addrspace(1)* %16, align 8
+  %16 = getelementptr inbounds double, ptr addrspace(1) %0, i64 7
+  store double %15, ptr addrspace(1) %16, align 8
   %17 = tail call spir_func double @_Z40sub_group_non_uniform_scan_exclusive_addd(double 0.000000e+00)
-  %18 = getelementptr inbounds double, double addrspace(1)* %0, i64 8
-  store double %17, double addrspace(1)* %18, align 8
+  %18 = getelementptr inbounds double, ptr addrspace(1) %0, i64 8
+  store double %17, ptr addrspace(1) %18, align 8
   %19 = tail call spir_func double @_Z40sub_group_non_uniform_scan_exclusive_muld(double 0.000000e+00)
-  %20 = getelementptr inbounds double, double addrspace(1)* %0, i64 9
-  store double %19, double addrspace(1)* %20, align 8
+  %20 = getelementptr inbounds double, ptr addrspace(1) %0, i64 9
+  store double %19, ptr addrspace(1) %20, align 8
   %21 = tail call spir_func double @_Z40sub_group_non_uniform_scan_exclusive_mind(double 0.000000e+00)
-  %22 = getelementptr inbounds double, double addrspace(1)* %0, i64 10
-  store double %21, double addrspace(1)* %22, align 8
+  %22 = getelementptr inbounds double, ptr addrspace(1) %0, i64 10
+  store double %21, ptr addrspace(1) %22, align 8
   %23 = tail call spir_func double @_Z40sub_group_non_uniform_scan_exclusive_maxd(double 0.000000e+00)
-  %24 = getelementptr inbounds double, double addrspace(1)* %0, i64 11
-  store double %23, double addrspace(1)* %24, align 8
+  %24 = getelementptr inbounds double, ptr addrspace(1) %0, i64 11
+  store double %23, ptr addrspace(1) %24, align 8
   ret void
 }
 
@@ -1236,39 +1236,39 @@ declare dso_local spir_func double @_Z40sub_group_non_uniform_scan_exclusive_max
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformBitwiseChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformBitwiseChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_andc(i8 signext 0)
   %r2 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseAnd(i32 3, i32 0, i8 signext 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func signext i8 @_Z31sub_group_non_uniform_reduce_orc(i8 signext 0)
   %r3 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseOr(i32 3, i32 0, i8 signext 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func signext i8 @_Z32sub_group_non_uniform_reduce_xorc(i8 signext 0)
   %r5 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseXor(i32 3, i32 0, i8 signext 0)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   %7 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_andc(i8 signext 0)
   %r7 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseAnd(i32 3, i32 1, i8 signext 0)
-  %8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
-  store i8 %7, i8 addrspace(1)* %8, align 1
+  %8 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 3
+  store i8 %7, ptr addrspace(1) %8, align 1
   %9 = tail call spir_func signext i8 @_Z39sub_group_non_uniform_scan_inclusive_orc(i8 signext 0)
   %r9 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseOr(i32 3, i32 1, i8 signext 0)
-  %10 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 4
-  store i8 %9, i8 addrspace(1)* %10, align 1
+  %10 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 4
+  store i8 %9, ptr addrspace(1) %10, align 1
   %11 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_inclusive_xorc(i8 signext 0)
   %r11 = tail call spir_func signext i8 @__spirv_GroupNonUniformBitwiseXor(i32 3, i32 1, i8 signext 0)
-  %12 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 5
-  store i8 %11, i8 addrspace(1)* %12, align 1
+  %12 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 5
+  store i8 %11, ptr addrspace(1) %12, align 1
   %13 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive_andc(i8 signext 0)
-  %14 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 6
-  store i8 %13, i8 addrspace(1)* %14, align 1
+  %14 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 6
+  store i8 %13, ptr addrspace(1) %14, align 1
   %15 = tail call spir_func signext i8 @_Z39sub_group_non_uniform_scan_exclusive_orc(i8 signext 0)
-  %16 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 7
-  store i8 %15, i8 addrspace(1)* %16, align 1
+  %16 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 7
+  store i8 %15, ptr addrspace(1) %16, align 1
   %17 = tail call spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive_xorc(i8 signext 0)
-  %18 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
-  store i8 %17, i8 addrspace(1)* %18, align 1
+  %18 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 8
+  store i8 %17, ptr addrspace(1) %18, align 1
   ret void
 }
 
@@ -1305,33 +1305,33 @@ declare dso_local spir_func signext i8 @_Z40sub_group_non_uniform_scan_exclusive
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#char]] %[[#ScopeSubgroup]] ExclusiveScan %[[#char_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformBitwiseUChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformBitwiseUChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i8 @_Z32sub_group_non_uniform_reduce_andh(i8 zeroext 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func zeroext i8 @_Z31sub_group_non_uniform_reduce_orh(i8 zeroext 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   %5 = tail call spir_func zeroext i8 @_Z32sub_group_non_uniform_reduce_xorh(i8 zeroext 0)
-  %6 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 2
-  store i8 %5, i8 addrspace(1)* %6, align 1
+  %6 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 2
+  store i8 %5, ptr addrspace(1) %6, align 1
   %7 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_inclusive_andh(i8 zeroext 0)
-  %8 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 3
-  store i8 %7, i8 addrspace(1)* %8, align 1
+  %8 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 3
+  store i8 %7, ptr addrspace(1) %8, align 1
   %9 = tail call spir_func zeroext i8 @_Z39sub_group_non_uniform_scan_inclusive_orh(i8 zeroext 0)
-  %10 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 4
-  store i8 %9, i8 addrspace(1)* %10, align 1
+  %10 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 4
+  store i8 %9, ptr addrspace(1) %10, align 1
   %11 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_inclusive_xorh(i8 zeroext 0)
-  %12 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 5
-  store i8 %11, i8 addrspace(1)* %12, align 1
+  %12 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 5
+  store i8 %11, ptr addrspace(1) %12, align 1
   %13 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_exclusive_andh(i8 zeroext 0)
-  %14 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 6
-  store i8 %13, i8 addrspace(1)* %14, align 1
+  %14 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 6
+  store i8 %13, ptr addrspace(1) %14, align 1
   %15 = tail call spir_func zeroext i8 @_Z39sub_group_non_uniform_scan_exclusive_orh(i8 zeroext 0)
-  %16 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 7
-  store i8 %15, i8 addrspace(1)* %16, align 1
+  %16 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 7
+  store i8 %15, ptr addrspace(1) %16, align 1
   %17 = tail call spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_exclusive_xorh(i8 zeroext 0)
-  %18 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 8
-  store i8 %17, i8 addrspace(1)* %18, align 1
+  %18 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 8
+  store i8 %17, ptr addrspace(1) %18, align 1
   ret void
 }
 
@@ -1365,33 +1365,33 @@ declare dso_local spir_func zeroext i8 @_Z40sub_group_non_uniform_scan_exclusive
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#short]] %[[#ScopeSubgroup]] ExclusiveScan %[[#short_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformBitwiseShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformBitwiseShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i16 @_Z32sub_group_non_uniform_reduce_ands(i16 signext 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func signext i16 @_Z31sub_group_non_uniform_reduce_ors(i16 signext 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func signext i16 @_Z32sub_group_non_uniform_reduce_xors(i16 signext 0)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_inclusive_ands(i16 signext 0)
-  %8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
-  store i16 %7, i16 addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 3
+  store i16 %7, ptr addrspace(1) %8, align 2
   %9 = tail call spir_func signext i16 @_Z39sub_group_non_uniform_scan_inclusive_ors(i16 signext 0)
-  %10 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 4
-  store i16 %9, i16 addrspace(1)* %10, align 2
+  %10 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 4
+  store i16 %9, ptr addrspace(1) %10, align 2
   %11 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_inclusive_xors(i16 signext 0)
-  %12 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 5
-  store i16 %11, i16 addrspace(1)* %12, align 2
+  %12 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 5
+  store i16 %11, ptr addrspace(1) %12, align 2
   %13 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusive_ands(i16 signext 0)
-  %14 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 6
-  store i16 %13, i16 addrspace(1)* %14, align 2
+  %14 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 6
+  store i16 %13, ptr addrspace(1) %14, align 2
   %15 = tail call spir_func signext i16 @_Z39sub_group_non_uniform_scan_exclusive_ors(i16 signext 0)
-  %16 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 7
-  store i16 %15, i16 addrspace(1)* %16, align 2
+  %16 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 7
+  store i16 %15, ptr addrspace(1) %16, align 2
   %17 = tail call spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusive_xors(i16 signext 0)
-  %18 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 8
-  store i16 %17, i16 addrspace(1)* %18, align 2
+  %18 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 8
+  store i16 %17, ptr addrspace(1) %18, align 2
   ret void
 }
 
@@ -1425,33 +1425,33 @@ declare dso_local spir_func signext i16 @_Z40sub_group_non_uniform_scan_exclusiv
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#short]] %[[#ScopeSubgroup]] ExclusiveScan %[[#short_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformBitwiseUShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformBitwiseUShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_andt(i16 zeroext 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func zeroext i16 @_Z31sub_group_non_uniform_reduce_ort(i16 zeroext 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   %5 = tail call spir_func zeroext i16 @_Z32sub_group_non_uniform_reduce_xort(i16 zeroext 0)
-  %6 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 2
-  store i16 %5, i16 addrspace(1)* %6, align 2
+  %6 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 2
+  store i16 %5, ptr addrspace(1) %6, align 2
   %7 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_inclusive_andt(i16 zeroext 0)
-  %8 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 3
-  store i16 %7, i16 addrspace(1)* %8, align 2
+  %8 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 3
+  store i16 %7, ptr addrspace(1) %8, align 2
   %9 = tail call spir_func zeroext i16 @_Z39sub_group_non_uniform_scan_inclusive_ort(i16 zeroext 0)
-  %10 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 4
-  store i16 %9, i16 addrspace(1)* %10, align 2
+  %10 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 4
+  store i16 %9, ptr addrspace(1) %10, align 2
   %11 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_inclusive_xort(i16 zeroext 0)
-  %12 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 5
-  store i16 %11, i16 addrspace(1)* %12, align 2
+  %12 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 5
+  store i16 %11, ptr addrspace(1) %12, align 2
   %13 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_exclusive_andt(i16 zeroext 0)
-  %14 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 6
-  store i16 %13, i16 addrspace(1)* %14, align 2
+  %14 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 6
+  store i16 %13, ptr addrspace(1) %14, align 2
   %15 = tail call spir_func zeroext i16 @_Z39sub_group_non_uniform_scan_exclusive_ort(i16 zeroext 0)
-  %16 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 7
-  store i16 %15, i16 addrspace(1)* %16, align 2
+  %16 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 7
+  store i16 %15, ptr addrspace(1) %16, align 2
   %17 = tail call spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_exclusive_xort(i16 zeroext 0)
-  %18 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 8
-  store i16 %17, i16 addrspace(1)* %18, align 2
+  %18 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 8
+  store i16 %17, ptr addrspace(1) %18, align 2
   ret void
 }
 
@@ -1485,33 +1485,33 @@ declare dso_local spir_func zeroext i16 @_Z40sub_group_non_uniform_scan_exclusiv
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#int]] %[[#ScopeSubgroup]] ExclusiveScan %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformBitwiseInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformBitwiseInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_andi(i32 0)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z31sub_group_non_uniform_reduce_ori(i32 0)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_xori(i32 0)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_andi(i32 0)
-  %8 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
-  store i32 %7, i32 addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 3
+  store i32 %7, ptr addrspace(1) %8, align 4
   %9 = tail call spir_func i32 @_Z39sub_group_non_uniform_scan_inclusive_ori(i32 0)
-  %10 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 4
-  store i32 %9, i32 addrspace(1)* %10, align 4
+  %10 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 4
+  store i32 %9, ptr addrspace(1) %10, align 4
   %11 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_xori(i32 0)
-  %12 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 5
-  store i32 %11, i32 addrspace(1)* %12, align 4
+  %12 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 5
+  store i32 %11, ptr addrspace(1) %12, align 4
   %13 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_andi(i32 0)
-  %14 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 6
-  store i32 %13, i32 addrspace(1)* %14, align 4
+  %14 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 6
+  store i32 %13, ptr addrspace(1) %14, align 4
   %15 = tail call spir_func i32 @_Z39sub_group_non_uniform_scan_exclusive_ori(i32 0)
-  %16 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 7
-  store i32 %15, i32 addrspace(1)* %16, align 4
+  %16 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 7
+  store i32 %15, ptr addrspace(1) %16, align 4
   %17 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_xori(i32 0)
-  %18 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 8
-  store i32 %17, i32 addrspace(1)* %18, align 4
+  %18 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 8
+  store i32 %17, ptr addrspace(1) %18, align 4
   ret void
 }
 
@@ -1545,33 +1545,33 @@ declare dso_local spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_xori(i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#int]] %[[#ScopeSubgroup]] ExclusiveScan %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformBitwiseUInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformBitwiseUInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_andj(i32 0)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z31sub_group_non_uniform_reduce_orj(i32 0)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z32sub_group_non_uniform_reduce_xorj(i32 0)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_andj(i32 0)
-  %8 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
-  store i32 %7, i32 addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 3
+  store i32 %7, ptr addrspace(1) %8, align 4
   %9 = tail call spir_func i32 @_Z39sub_group_non_uniform_scan_inclusive_orj(i32 0)
-  %10 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 4
-  store i32 %9, i32 addrspace(1)* %10, align 4
+  %10 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 4
+  store i32 %9, ptr addrspace(1) %10, align 4
   %11 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_inclusive_xorj(i32 0)
-  %12 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 5
-  store i32 %11, i32 addrspace(1)* %12, align 4
+  %12 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 5
+  store i32 %11, ptr addrspace(1) %12, align 4
   %13 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_andj(i32 0)
-  %14 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 6
-  store i32 %13, i32 addrspace(1)* %14, align 4
+  %14 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 6
+  store i32 %13, ptr addrspace(1) %14, align 4
   %15 = tail call spir_func i32 @_Z39sub_group_non_uniform_scan_exclusive_orj(i32 0)
-  %16 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 7
-  store i32 %15, i32 addrspace(1)* %16, align 4
+  %16 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 7
+  store i32 %15, ptr addrspace(1) %16, align 4
   %17 = tail call spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_xorj(i32 0)
-  %18 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 8
-  store i32 %17, i32 addrspace(1)* %18, align 4
+  %18 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 8
+  store i32 %17, ptr addrspace(1) %18, align 4
   ret void
 }
 
@@ -1605,33 +1605,33 @@ declare dso_local spir_func i32 @_Z40sub_group_non_uniform_scan_exclusive_xorj(i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#long]] %[[#ScopeSubgroup]] ExclusiveScan %[[#long_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformBitwiseLong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformBitwiseLong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_andl(i64 0)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z31sub_group_non_uniform_reduce_orl(i64 0)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_xorl(i64 0)
-  %6 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 2
-  store i64 %5, i64 addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 2
+  store i64 %5, ptr addrspace(1) %6, align 8
   %7 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_andl(i64 0)
-  %8 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 3
-  store i64 %7, i64 addrspace(1)* %8, align 8
+  %8 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 3
+  store i64 %7, ptr addrspace(1) %8, align 8
   %9 = tail call spir_func i64 @_Z39sub_group_non_uniform_scan_inclusive_orl(i64 0)
-  %10 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 4
-  store i64 %9, i64 addrspace(1)* %10, align 8
+  %10 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 4
+  store i64 %9, ptr addrspace(1) %10, align 8
   %11 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_xorl(i64 0)
-  %12 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 5
-  store i64 %11, i64 addrspace(1)* %12, align 8
+  %12 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 5
+  store i64 %11, ptr addrspace(1) %12, align 8
   %13 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_andl(i64 0)
-  %14 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 6
-  store i64 %13, i64 addrspace(1)* %14, align 8
+  %14 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 6
+  store i64 %13, ptr addrspace(1) %14, align 8
   %15 = tail call spir_func i64 @_Z39sub_group_non_uniform_scan_exclusive_orl(i64 0)
-  %16 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 7
-  store i64 %15, i64 addrspace(1)* %16, align 8
+  %16 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 7
+  store i64 %15, ptr addrspace(1) %16, align 8
   %17 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_xorl(i64 0)
-  %18 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 8
-  store i64 %17, i64 addrspace(1)* %18, align 8
+  %18 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 8
+  store i64 %17, ptr addrspace(1) %18, align 8
   ret void
 }
 
@@ -1665,33 +1665,33 @@ declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_xorl(i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformBitwiseXor %[[#long]] %[[#ScopeSubgroup]] ExclusiveScan %[[#long_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformBitwiseULong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformBitwiseULong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_andm(i64 0)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z31sub_group_non_uniform_reduce_orm(i64 0)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   %5 = tail call spir_func i64 @_Z32sub_group_non_uniform_reduce_xorm(i64 0)
-  %6 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 2
-  store i64 %5, i64 addrspace(1)* %6, align 8
+  %6 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 2
+  store i64 %5, ptr addrspace(1) %6, align 8
   %7 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_andm(i64 0)
-  %8 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 3
-  store i64 %7, i64 addrspace(1)* %8, align 8
+  %8 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 3
+  store i64 %7, ptr addrspace(1) %8, align 8
   %9 = tail call spir_func i64 @_Z39sub_group_non_uniform_scan_inclusive_orm(i64 0)
-  %10 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 4
-  store i64 %9, i64 addrspace(1)* %10, align 8
+  %10 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 4
+  store i64 %9, ptr addrspace(1) %10, align 8
   %11 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_inclusive_xorm(i64 0)
-  %12 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 5
-  store i64 %11, i64 addrspace(1)* %12, align 8
+  %12 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 5
+  store i64 %11, ptr addrspace(1) %12, align 8
   %13 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_andm(i64 0)
-  %14 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 6
-  store i64 %13, i64 addrspace(1)* %14, align 8
+  %14 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 6
+  store i64 %13, ptr addrspace(1) %14, align 8
   %15 = tail call spir_func i64 @_Z39sub_group_non_uniform_scan_exclusive_orm(i64 0)
-  %16 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 7
-  store i64 %15, i64 addrspace(1)* %16, align 8
+  %16 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 7
+  store i64 %15, ptr addrspace(1) %16, align 8
   %17 = tail call spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_xorm(i64 0)
-  %18 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 8
-  store i64 %17, i64 addrspace(1)* %18, align 8
+  %18 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 8
+  store i64 %17, ptr addrspace(1) %18, align 8
   ret void
 }
 
@@ -1725,36 +1725,36 @@ declare dso_local spir_func i64 @_Z40sub_group_non_uniform_scan_exclusive_xorm(i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformLogicalXor %[[#bool]] %[[#ScopeSubgroup]] ExclusiveScan %[[#false]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testNonUniformLogical(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testNonUniformLogical(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z40sub_group_non_uniform_reduce_logical_andi(i32 0)
   %r2 = tail call spir_func i1 @__spirv_GroupNonUniformLogicalAnd(i32 3, i32 0, i1 false)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z39sub_group_non_uniform_reduce_logical_ori(i32 0)
   %r3 = tail call spir_func i1 @__spirv_GroupNonUniformLogicalOr(i32 3, i32 0, i1 false)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   %5 = tail call spir_func i32 @_Z40sub_group_non_uniform_reduce_logical_xori(i32 0)
   %r5 = tail call spir_func i1 @__spirv_GroupNonUniformLogicalXor(i32 3, i32 0, i1 false)
-  %6 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 2
-  store i32 %5, i32 addrspace(1)* %6, align 4
+  %6 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 2
+  store i32 %5, ptr addrspace(1) %6, align 4
   %7 = tail call spir_func i32 @_Z48sub_group_non_uniform_scan_inclusive_logical_andi(i32 0)
-  %8 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 3
-  store i32 %7, i32 addrspace(1)* %8, align 4
+  %8 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 3
+  store i32 %7, ptr addrspace(1) %8, align 4
   %9 = tail call spir_func i32 @_Z47sub_group_non_uniform_scan_inclusive_logical_ori(i32 0)
-  %10 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 4
-  store i32 %9, i32 addrspace(1)* %10, align 4
+  %10 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 4
+  store i32 %9, ptr addrspace(1) %10, align 4
   %11 = tail call spir_func i32 @_Z48sub_group_non_uniform_scan_inclusive_logical_xori(i32 0)
-  %12 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 5
-  store i32 %11, i32 addrspace(1)* %12, align 4
+  %12 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 5
+  store i32 %11, ptr addrspace(1) %12, align 4
   %13 = tail call spir_func i32 @_Z48sub_group_non_uniform_scan_exclusive_logical_andi(i32 0)
-  %14 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 6
-  store i32 %13, i32 addrspace(1)* %14, align 4
+  %14 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 6
+  store i32 %13, ptr addrspace(1) %14, align 4
   %15 = tail call spir_func i32 @_Z47sub_group_non_uniform_scan_exclusive_logical_ori(i32 0)
-  %16 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 7
-  store i32 %15, i32 addrspace(1)* %16, align 4
+  %16 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 7
+  store i32 %15, ptr addrspace(1) %16, align 4
   %17 = tail call spir_func i32 @_Z48sub_group_non_uniform_scan_exclusive_logical_xori(i32 0)
-  %18 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 8
-  store i32 %17, i32 addrspace(1)* %18, align 4
+  %18 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 8
+  store i32 %17, ptr addrspace(1) %18, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_vote.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_vote.ll
index 943f4013fbf93..dc6a5ff60bf17 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_vote.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_vote.ll
@@ -91,10 +91,10 @@
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformElect %[[#bool]] %[[#ScopeSubgroup]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testSubGroupElect(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testSubGroupElect(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z15sub_group_electv()
   %r2 = tail call spir_func i1 @__spirv_GroupNonUniformElect(i32 3)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   ret void
 }
 
@@ -106,10 +106,10 @@ declare dso_local spir_func i1 @__spirv_GroupNonUniformElect(i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAll %[[#bool]] %[[#ScopeSubgroup]] %[[#true]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testSubGroupNonUniformAll(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testSubGroupNonUniformAll(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z25sub_group_non_uniform_alli(i32 0)
   %r2 = tail call spir_func i1 @__spirv_GroupNonUniformAll(i32 3, i1 true)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   ret void
 }
 
@@ -121,10 +121,10 @@ declare dso_local spir_func i1 @__spirv_GroupNonUniformAll(i32, i1)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAny %[[#bool]] %[[#ScopeSubgroup]] %[[#true]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testSubGroupNonUniformAny(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testSubGroupNonUniformAny(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z25sub_group_non_uniform_anyi(i32 0)
   %r2 = tail call spir_func i1 @__spirv_GroupNonUniformAny(i32 3, i1 true)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   ret void
 }
 
@@ -146,30 +146,30 @@ declare dso_local spir_func i1 @__spirv_GroupNonUniformAny(i32, i1)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformAllEqual %[[#bool]] %[[#ScopeSubgroup]] %[[#double_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testSubGroupNonUniformAllEqual(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testSubGroupNonUniformAllEqual(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalc(i8 signext 0)
   %r2 = tail call spir_func i1 @__spirv_GroupNonUniformAllEqual(i32 3, i8 signext 10)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalh(i8 zeroext 0)
-  store i32 %3, i32 addrspace(1)* %0, align 4
+  store i32 %3, ptr addrspace(1) %0, align 4
   %4 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equals(i16 signext 0)
-  store i32 %4, i32 addrspace(1)* %0, align 4
+  store i32 %4, ptr addrspace(1) %0, align 4
   %5 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalt(i16 zeroext 0)
-  store i32 %5, i32 addrspace(1)* %0, align 4
+  store i32 %5, ptr addrspace(1) %0, align 4
   %6 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equali(i32 0)
-  store i32 %6, i32 addrspace(1)* %0, align 4
+  store i32 %6, ptr addrspace(1) %0, align 4
   %7 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalj(i32 0)
-  store i32 %7, i32 addrspace(1)* %0, align 4
+  store i32 %7, ptr addrspace(1) %0, align 4
   %8 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equall(i64 0)
-  store i32 %8, i32 addrspace(1)* %0, align 4
+  store i32 %8, ptr addrspace(1) %0, align 4
   %9 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalm(i64 0)
-  store i32 %9, i32 addrspace(1)* %0, align 4
+  store i32 %9, ptr addrspace(1) %0, align 4
   %10 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalf(float 0.000000e+00)
-  store i32 %10, i32 addrspace(1)* %0, align 4
+  store i32 %10, ptr addrspace(1) %0, align 4
   %11 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equalDh(half 0xH0000)
-  store i32 %11, i32 addrspace(1)* %0, align 4
+  store i32 %11, ptr addrspace(1) %0, align 4
   %12 = tail call spir_func i32 @_Z31sub_group_non_uniform_all_equald(double 0.000000e+00)
-  store i32 %12, i32 addrspace(1)* %0, align 4
+  store i32 %12, ptr addrspace(1) %0, align 4
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll
index 013c3030d5568..db386904030e8 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle.ll
@@ -106,14 +106,14 @@
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i8 @_Z17sub_group_shufflecj(i8 signext 0, i32 0)
   %r2 = tail call spir_func signext i8 @__spirv_GroupNonUniformShuffle(i32 3, i8 signext 0, i32 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func signext i8 @_Z21sub_group_shuffle_xorcj(i8 signext 0, i32 0)
   %r3 = tail call spir_func signext i8 @__spirv_GroupNonUniformShuffleXor(i32 3, i8 signext 0, i32 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   ret void
 }
 
@@ -128,12 +128,12 @@ declare dso_local spir_func signext i8 @__spirv_GroupNonUniformShuffleXor(i32, i
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleUChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleUChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i8 @_Z17sub_group_shufflehj(i8 zeroext 0, i32 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func zeroext i8 @_Z21sub_group_shuffle_xorhj(i8 zeroext 0, i32 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   ret void
 }
 
@@ -146,12 +146,12 @@ declare dso_local spir_func zeroext i8 @_Z21sub_group_shuffle_xorhj(i8 zeroext,
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#short]] %[[#ScopeSubgroup]] %[[#short_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i16 @_Z17sub_group_shufflesj(i16 signext 0, i32 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func signext i16 @_Z21sub_group_shuffle_xorsj(i16 signext 0, i32 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   ret void
 }
 
@@ -164,12 +164,12 @@ declare dso_local spir_func signext i16 @_Z21sub_group_shuffle_xorsj(i16 signext
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#short]] %[[#ScopeSubgroup]] %[[#short_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleUShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleUShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i16 @_Z17sub_group_shuffletj(i16 zeroext 0, i32 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func zeroext i16 @_Z21sub_group_shuffle_xortj(i16 zeroext 0, i32 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   ret void
 }
 
@@ -182,12 +182,12 @@ declare dso_local spir_func zeroext i16 @_Z21sub_group_shuffle_xortj(i16 zeroext
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#int]] %[[#ScopeSubgroup]] %[[#int_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z17sub_group_shuffleij(i32 0, i32 0)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z21sub_group_shuffle_xorij(i32 0, i32 0)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   ret void
 }
 
@@ -200,12 +200,12 @@ declare dso_local spir_func i32 @_Z21sub_group_shuffle_xorij(i32, i32) local_unn
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#int]] %[[#ScopeSubgroup]] %[[#int_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleUInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleUInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z17sub_group_shufflejj(i32 0, i32 0)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z21sub_group_shuffle_xorjj(i32 0, i32 0)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   ret void
 }
 
@@ -218,12 +218,12 @@ declare dso_local spir_func i32 @_Z21sub_group_shuffle_xorjj(i32, i32) local_unn
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#long]] %[[#ScopeSubgroup]] %[[#long_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleLong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleLong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z17sub_group_shufflelj(i64 0, i32 0)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z21sub_group_shuffle_xorlj(i64 0, i32 0)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   ret void
 }
 
@@ -236,12 +236,12 @@ declare dso_local spir_func i64 @_Z21sub_group_shuffle_xorlj(i64, i32) local_unn
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#long]] %[[#ScopeSubgroup]] %[[#long_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleULong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleULong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z17sub_group_shufflemj(i64 0, i32 0)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z21sub_group_shuffle_xormj(i64 0, i32 0)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   ret void
 }
 
@@ -254,12 +254,12 @@ declare dso_local spir_func i64 @_Z21sub_group_shuffle_xormj(i64, i32) local_unn
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#float]] %[[#ScopeSubgroup]] %[[#float_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleFloat(float addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleFloat(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func float @_Z17sub_group_shufflefj(float 0.000000e+00, i32 0)
-  store float %2, float addrspace(1)* %0, align 4
+  store float %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func float @_Z21sub_group_shuffle_xorfj(float 0.000000e+00, i32 0)
-  %4 = getelementptr inbounds float, float addrspace(1)* %0, i64 1
-  store float %3, float addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds float, ptr addrspace(1) %0, i64 1
+  store float %3, ptr addrspace(1) %4, align 4
   ret void
 }
 
@@ -272,12 +272,12 @@ declare dso_local spir_func float @_Z21sub_group_shuffle_xorfj(float, i32) local
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#half]] %[[#ScopeSubgroup]] %[[#half_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleHalf(half addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleHalf(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func half @_Z17sub_group_shuffleDhj(half 0xH0000, i32 0)
-  store half %2, half addrspace(1)* %0, align 2
+  store half %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func half @_Z21sub_group_shuffle_xorDhj(half 0xH0000, i32 0)
-  %4 = getelementptr inbounds half, half addrspace(1)* %0, i64 1
-  store half %3, half addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds half, ptr addrspace(1) %0, i64 1
+  store half %3, ptr addrspace(1) %4, align 2
   ret void
 }
 
@@ -290,12 +290,12 @@ declare dso_local spir_func half @_Z21sub_group_shuffle_xorDhj(half, i32) local_
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleXor %[[#double]] %[[#ScopeSubgroup]] %[[#double_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleDouble(double addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleDouble(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func double @_Z17sub_group_shuffledj(double 0.000000e+00, i32 0)
-  store double %2, double addrspace(1)* %0, align 8
+  store double %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func double @_Z21sub_group_shuffle_xordj(double 0.000000e+00, i32 0)
-  %4 = getelementptr inbounds double, double addrspace(1)* %0, i64 1
-  store double %3, double addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds double, ptr addrspace(1) %0, i64 1
+  store double %3, ptr addrspace(1) %4, align 8
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll
index fe2c7af43c1b6..588211fdc30b8 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_shuffle_relative.ll
@@ -108,14 +108,14 @@
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i8 @_Z20sub_group_shuffle_upcj(i8 signext 0, i32 0)
   %w2 = tail call spir_func i8 @__spirv_GroupNonUniformShuffleUp(i32 3, i8 signext 0, i32 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func signext i8 @_Z22sub_group_shuffle_downcj(i8 signext 0, i32 0)
   %w3 = tail call spir_func i8 @__spirv_GroupNonUniformShuffleDown(i32 3, i8 signext 0, i32 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   ret void
 }
 
@@ -132,12 +132,12 @@ declare dso_local spir_func i8 @__spirv_GroupNonUniformShuffleDown(i32, i8, i32)
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#char]] %[[#ScopeSubgroup]] %[[#char_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeUChar(i8 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeUChar(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i8 @_Z20sub_group_shuffle_uphj(i8 zeroext 0, i32 0)
-  store i8 %2, i8 addrspace(1)* %0, align 1
+  store i8 %2, ptr addrspace(1) %0, align 1
   %3 = tail call spir_func zeroext i8 @_Z22sub_group_shuffle_downhj(i8 zeroext 0, i32 0)
-  %4 = getelementptr inbounds i8, i8 addrspace(1)* %0, i64 1
-  store i8 %3, i8 addrspace(1)* %4, align 1
+  %4 = getelementptr inbounds i8, ptr addrspace(1) %0, i64 1
+  store i8 %3, ptr addrspace(1) %4, align 1
   ret void
 }
 
@@ -150,12 +150,12 @@ declare dso_local spir_func zeroext i8 @_Z22sub_group_shuffle_downhj(i8 zeroext,
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#short]] %[[#ScopeSubgroup]] %[[#short_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func signext i16 @_Z20sub_group_shuffle_upsj(i16 signext 0, i32 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func signext i16 @_Z22sub_group_shuffle_downsj(i16 signext 0, i32 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   ret void
 }
 
@@ -168,12 +168,12 @@ declare dso_local spir_func signext i16 @_Z22sub_group_shuffle_downsj(i16 signex
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#short]] %[[#ScopeSubgroup]] %[[#short_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeUShort(i16 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeUShort(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func zeroext i16 @_Z20sub_group_shuffle_uptj(i16 zeroext 0, i32 0)
-  store i16 %2, i16 addrspace(1)* %0, align 2
+  store i16 %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func zeroext i16 @_Z22sub_group_shuffle_downtj(i16 zeroext 0, i32 0)
-  %4 = getelementptr inbounds i16, i16 addrspace(1)* %0, i64 1
-  store i16 %3, i16 addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds i16, ptr addrspace(1) %0, i64 1
+  store i16 %3, ptr addrspace(1) %4, align 2
   ret void
 }
 
@@ -186,12 +186,12 @@ declare dso_local spir_func zeroext i16 @_Z22sub_group_shuffle_downtj(i16 zeroex
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#int]] %[[#ScopeSubgroup]] %[[#int_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z20sub_group_shuffle_upij(i32 0, i32 0)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z22sub_group_shuffle_downij(i32 0, i32 0)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   ret void
 }
 
@@ -204,12 +204,12 @@ declare dso_local spir_func i32 @_Z22sub_group_shuffle_downij(i32, i32) local_un
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#int]] %[[#ScopeSubgroup]] %[[#int_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeUInt(i32 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeUInt(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i32 @_Z20sub_group_shuffle_upjj(i32 0, i32 0)
-  store i32 %2, i32 addrspace(1)* %0, align 4
+  store i32 %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func i32 @_Z22sub_group_shuffle_downjj(i32 0, i32 0)
-  %4 = getelementptr inbounds i32, i32 addrspace(1)* %0, i64 1
-  store i32 %3, i32 addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds i32, ptr addrspace(1) %0, i64 1
+  store i32 %3, ptr addrspace(1) %4, align 4
   ret void
 }
 
@@ -222,12 +222,12 @@ declare dso_local spir_func i32 @_Z22sub_group_shuffle_downjj(i32, i32) local_un
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#long]] %[[#ScopeSubgroup]] %[[#long_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeLong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeLong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z20sub_group_shuffle_uplj(i64 0, i32 0)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z22sub_group_shuffle_downlj(i64 0, i32 0)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   ret void
 }
 
@@ -240,12 +240,12 @@ declare dso_local spir_func i64 @_Z22sub_group_shuffle_downlj(i64, i32) local_un
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#long]] %[[#ScopeSubgroup]] %[[#long_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeULong(i64 addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeULong(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func i64 @_Z20sub_group_shuffle_upmj(i64 0, i32 0)
-  store i64 %2, i64 addrspace(1)* %0, align 8
+  store i64 %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func i64 @_Z22sub_group_shuffle_downmj(i64 0, i32 0)
-  %4 = getelementptr inbounds i64, i64 addrspace(1)* %0, i64 1
-  store i64 %3, i64 addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds i64, ptr addrspace(1) %0, i64 1
+  store i64 %3, ptr addrspace(1) %4, align 8
   ret void
 }
 
@@ -258,12 +258,12 @@ declare dso_local spir_func i64 @_Z22sub_group_shuffle_downmj(i64, i32) local_un
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#float]] %[[#ScopeSubgroup]] %[[#float_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeFloat(float addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeFloat(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func float @_Z20sub_group_shuffle_upfj(float 0.000000e+00, i32 0)
-  store float %2, float addrspace(1)* %0, align 4
+  store float %2, ptr addrspace(1) %0, align 4
   %3 = tail call spir_func float @_Z22sub_group_shuffle_downfj(float 0.000000e+00, i32 0)
-  %4 = getelementptr inbounds float, float addrspace(1)* %0, i64 1
-  store float %3, float addrspace(1)* %4, align 4
+  %4 = getelementptr inbounds float, ptr addrspace(1) %0, i64 1
+  store float %3, ptr addrspace(1) %4, align 4
   ret void
 }
 
@@ -276,12 +276,12 @@ declare dso_local spir_func float @_Z22sub_group_shuffle_downfj(float, i32) loca
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#half]] %[[#ScopeSubgroup]] %[[#half_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeHalf(half addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeHalf(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func half @_Z20sub_group_shuffle_upDhj(half 0xH0000, i32 0)
-  store half %2, half addrspace(1)* %0, align 2
+  store half %2, ptr addrspace(1) %0, align 2
   %3 = tail call spir_func half @_Z22sub_group_shuffle_downDhj(half 0xH0000, i32 0)
-  %4 = getelementptr inbounds half, half addrspace(1)* %0, i64 1
-  store half %3, half addrspace(1)* %4, align 2
+  %4 = getelementptr inbounds half, ptr addrspace(1) %0, i64 1
+  store half %3, ptr addrspace(1) %4, align 2
   ret void
 }
 
@@ -294,12 +294,12 @@ declare dso_local spir_func half @_Z22sub_group_shuffle_downDhj(half, i32) local
 ; CHECK-SPIRV: %[[#]] = OpGroupNonUniformShuffleDown %[[#double]] %[[#ScopeSubgroup]] %[[#double_0]] %[[#int_0]]
 ; CHECK-SPIRV: OpFunctionEnd
 
-define dso_local spir_kernel void @testShuffleRelativeDouble(double addrspace(1)* nocapture) local_unnamed_addr {
+define dso_local spir_kernel void @testShuffleRelativeDouble(ptr addrspace(1) nocapture) local_unnamed_addr {
   %2 = tail call spir_func double @_Z20sub_group_shuffle_updj(double 0.000000e+00, i32 0)
-  store double %2, double addrspace(1)* %0, align 8
+  store double %2, ptr addrspace(1) %0, align 8
   %3 = tail call spir_func double @_Z22sub_group_shuffle_downdj(double 0.000000e+00, i32 0)
-  %4 = getelementptr inbounds double, double addrspace(1)* %0, i64 1
-  store double %3, double addrspace(1)* %4, align 8
+  %4 = getelementptr inbounds double, ptr addrspace(1) %0, i64 1
+  store double %3, ptr addrspace(1) %4, align 8
   ret void
 }
 
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/vec8.ll b/llvm/test/CodeGen/SPIRV/transcoding/vec8.ll
index e055e0a1231c4..e1586af3d59a7 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/vec8.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/vec8.ll
@@ -10,6 +10,6 @@
 
 define spir_kernel void @test(<8 x i32> %v) {
   %1 = alloca <8 x i32>, align 32
-  store <8 x i32> %v, <8 x i32>* %1, align 32
+  store <8 x i32> %v, ptr %1, align 32
   ret void
 }
diff --git a/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll b/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll
index aed759ba843c3..714dd1185612f 100644
--- a/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll
+++ b/llvm/test/CodeGen/SPIRV/uitofp-with-bool.ll
@@ -75,7 +75,7 @@
 ; SPV-DAG: %[[#i1s:]] = OpFunctionParameter %[[#]]
 ; SPV-DAG: %[[#i1v:]] = OpFunctionParameter %[[#]]
 
-define dso_local spir_kernel void @K(float addrspace(1)* nocapture %A, i32 %B, i1 %i1s, <2 x i1> %i1v) local_unnamed_addr {
+define dso_local spir_kernel void @K(ptr addrspace(1) nocapture %A, i32 %B, i1 %i1s, <2 x i1> %i1v) local_unnamed_addr {
 entry:
 
 ; SPV: %[[#cmp_res:]] = OpSGreaterThan %[[#bool]] %[[#B]] %[[#zero_32]]
@@ -84,7 +84,7 @@ entry:
 ; SPV: %[[#utof_res:]] = OpConvertUToF %[[#float]] %[[#select_res]]
   %conv = uitofp i1 %cmp to float
 ; SPV: OpStore %[[#A]] %[[#utof_res]]
-  store float %conv, float addrspace(1)* %A, align 4;
+  store float %conv, ptr addrspace(1) %A, align 4;
 
 ; SPV: %[[#s1:]] = OpSelect %[[#int_8]] %[[#i1s]] %[[#mone_8]] %[[#zero_8]]
   %s1 = sext i1 %i1s to i8



More information about the llvm-commits mailing list